diff --git a/go.mod b/go.mod index 2a4369da..54f11178 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/go-chi/chi v1.5.4 github.com/go-oauth2/oauth2/v4 v4.4.2 github.com/go-session/session v3.1.2+incompatible - github.com/golang/protobuf v1.5.2 + github.com/golang/protobuf v1.5.3 github.com/google/uuid v1.3.0 github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.5.0 @@ -22,22 +22,22 @@ require ( github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/heetch/confita v0.10.0 github.com/karlseguin/ccache v2.0.3+incompatible - github.com/libp2p/go-libp2p v0.24.2 + github.com/libp2p/go-libp2p v0.27.8 github.com/libp2p/go-libp2p-kad-dht v0.20.0 github.com/mailjet/mailjet-apiv3-go/v3 v3.1.1 - github.com/multiformats/go-multiaddr v0.8.0 + github.com/multiformats/go-multiaddr v0.9.0 github.com/nicksnyder/go-i18n/v2 v2.1.2 github.com/prometheus/client_golang v1.14.0 github.com/satori/go.uuid v1.2.0 - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.2 github.com/urfave/negroni v1.0.0 go.uber.org/zap v1.24.0 golang.org/x/crypto v0.17.0 golang.org/x/net v0.10.0 - golang.org/x/oauth2 v0.3.0 + golang.org/x/oauth2 v0.5.0 golang.org/x/text v0.14.0 google.golang.org/grpc v1.40.0 - google.golang.org/protobuf v1.28.1 + google.golang.org/protobuf v1.30.0 ) require ( @@ -46,7 +46,7 @@ require ( github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/containerd/cgroups v1.0.4 // indirect + github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/dgraph-io/ristretto v0.1.0 // indirect @@ -55,19 +55,19 @@ require ( github.com/elastic/gosigar v0.14.2 // indirect github.com/flynn/noise v1.0.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect - github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt v3.2.1+incompatible // indirect github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect github.com/golang/mock v1.6.0 // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b // indirect + github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect - github.com/huin/goupnp v1.0.3 // indirect - github.com/ipfs/go-cid v0.3.2 // indirect + github.com/huin/goupnp v1.1.0 // indirect + github.com/ipfs/go-cid v0.4.1 // indirect github.com/ipfs/go-datastore v0.6.0 // indirect github.com/ipfs/go-ipfs-util v0.0.2 // indirect github.com/ipfs/go-ipns v0.3.0 // indirect @@ -78,29 +78,24 @@ require ( github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/goprocess v0.1.4 // indirect github.com/karlseguin/expect v1.0.8 // indirect - github.com/klauspost/compress v1.15.14 // indirect - github.com/klauspost/cpuid/v2 v2.2.3 // indirect - github.com/koron/go-ssdp v0.0.3 // indirect + github.com/klauspost/compress v1.16.4 // indirect + github.com/klauspost/cpuid/v2 v2.2.4 // indirect + github.com/koron/go-ssdp v0.0.4 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect - github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect + github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect github.com/libp2p/go-libp2p-kbucket v0.5.0 // indirect github.com/libp2p/go-libp2p-record v0.2.0 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect github.com/libp2p/go-nat v0.1.0 // indirect github.com/libp2p/go-netroute v0.2.1 // indirect - github.com/libp2p/go-openssl v0.1.0 // indirect github.com/libp2p/go-reuseport v0.2.0 // indirect github.com/libp2p/go-yamux/v4 v4.0.0 // indirect - github.com/lucas-clemente/quic-go v0.31.1 // indirect - github.com/marten-seemann/qtls-go1-18 v0.1.4 // indirect - github.com/marten-seemann/qtls-go1-19 v0.1.2 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-isatty v0.0.17 // indirect - github.com/mattn/go-pointer v0.0.1 // indirect + github.com/mattn/go-isatty v0.0.18 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/miekg/dns v1.1.50 // indirect + github.com/miekg/dns v1.1.53 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/sha256-simd v1.0.0 // indirect @@ -109,12 +104,12 @@ require ( github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect - github.com/multiformats/go-multibase v0.1.1 // indirect - github.com/multiformats/go-multicodec v0.7.0 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multicodec v0.8.1 // indirect github.com/multiformats/go-multihash v0.2.1 // indirect - github.com/multiformats/go-multistream v0.3.3 // indirect + github.com/multiformats/go-multistream v0.4.1 // indirect github.com/multiformats/go-varint v0.0.7 // indirect - github.com/onsi/ginkgo/v2 v2.7.0 // indirect + github.com/onsi/ginkgo/v2 v2.9.2 // indirect github.com/opencontainers/runtime-spec v1.0.2 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect @@ -122,10 +117,14 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.39.0 // indirect + github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect + github.com/quic-go/qpack v0.4.0 // indirect + github.com/quic-go/qtls-go1-19 v0.3.3 // indirect + github.com/quic-go/qtls-go1-20 v0.2.3 // indirect + github.com/quic-go/quic-go v0.33.0 // indirect + github.com/quic-go/webtransport-go v0.5.2 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect - github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/tidwall/btree v0.0.0-20191029221954-400434d76274 // indirect github.com/tidwall/buntdb v1.1.2 // indirect @@ -139,16 +138,17 @@ require ( go.opencensus.io v0.24.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/dig v1.16.1 // indirect - go.uber.org/fx v1.19.1 // indirect - go.uber.org/multierr v1.9.0 // indirect - golang.org/x/exp v0.0.0-20230113213754-f9f960f08ad4 // indirect - golang.org/x/mod v0.8.0 // indirect + go.uber.org/fx v1.19.2 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect + golang.org/x/mod v0.10.0 // indirect golang.org/x/sync v0.1.0 // indirect golang.org/x/sys v0.15.0 // indirect - golang.org/x/tools v0.6.0 // indirect + golang.org/x/tools v0.7.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20210521181308-5ccab8a35a9a // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.1.7 // indirect + nhooyr.io/websocket v1.8.7 // indirect ) diff --git a/go.sum b/go.sum index 6e9941ce..0924b750 100644 --- a/go.sum +++ b/go.sum @@ -162,8 +162,8 @@ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWH github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= -github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= -github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.3+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -268,6 +268,10 @@ github.com/gcash/bchutil v0.0.0-20191012211144-98e73ec336ba/go.mod h1:nUIrcbbtEQ github.com/gcash/bchutil v0.0.0-20200506001747-c2894cd54b33/go.mod h1:wB++2ZcHUvGLN1OgO9swBmJK1vmyshJLW9SNS+apXwc= github.com/gcash/bchutil v0.0.0-20210113190856-6ea28dff4000/go.mod h1:H2USFGwtiu6CNMxiVQPqZkDzsoVSt9BLNqTfBBqGXRo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-chi/chi v1.5.4 h1:QHdzF2szwjqVV4wmByUnTcsbIg7UGaQ0tPF2t5GcAIs= github.com/go-chi/chi v1.5.4/go.mod h1:uaf8YgoFazUOkPBG7fxPftUylNumIev9awIWOENIuEg= @@ -288,14 +292,21 @@ github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-oauth2/oauth2/v4 v4.4.2 h1:tWQlR5I4/qhWiyOME67BAFmo622yi+2mm7DMm8DpMdg= github.com/go-oauth2/oauth2/v4 v4.4.2/go.mod h1:K4DemYzNwwYnIDOPdHtX/7SlO0AHdtlphsTgE7lA3PA= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-session/session v3.1.2+incompatible h1:yStchEObKg4nk2F7JGE7KoFIrA/1Y078peagMWcrncg= github.com/go-session/session v3.1.2+incompatible/go.mod h1:8B3iivBQjrz/JtC68Np2T1yBBLxTan3mn/3OM0CyRt0= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= @@ -314,6 +325,12 @@ github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2 github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= @@ -364,8 +381,9 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= @@ -418,8 +436,8 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b h1:8htHrh2bw9c7Idkb7YNac+ZpTqLMjRpI+FWu51ltaQc= -github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b h1:Qcx5LM0fSiks9uCyFZwDBUasd3lxd1RM0GYpL+Li5o4= +github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -528,8 +546,8 @@ github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbc github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= -github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= -github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= +github.com/huin/goupnp v1.1.0 h1:gEe0Dp/lZmPZiDFzJJaOfUpOvv2MKUkoBX8lDrn9vKU= +github.com/huin/goupnp v1.1.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -540,8 +558,8 @@ github.com/improbable-eng/grpc-web v0.9.1/go.mod h1:6hRR09jOEG81ADP5wCQju1z71g6O github.com/improbable-eng/grpc-web v0.13.0/go.mod h1:6hRR09jOEG81ADP5wCQju1z71g6OL4eEvELdran/3cs= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc= -github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= +github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= +github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= @@ -584,7 +602,9 @@ github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlT github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= @@ -607,25 +627,26 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/kkdai/bstream v0.0.0-20181106074824-b3251f7901ec/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/kkdai/bstream v1.0.0/go.mod h1:FDnDOHt5Yx4p3FaHcioFT0QjDOtgUpvjeZqAs+NVZZA= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.10.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.10.10/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.15.14 h1:i7WCKDToww0wA+9qrUZ1xOjp218vfFo3nTU6UHp+gOc= -github.com/klauspost/compress v1.15.14/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.16.4 h1:91KN02FnsOYhuunwU4ssRe8lc2JosWmizWa91B5v1PU= +github.com/klauspost/compress v1.16.4/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU= -github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= +github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= -github.com/koron/go-ssdp v0.0.3 h1:JivLMY45N76b4p/vsWGOKewBQu6uf39y8l+AQ7sDKx8= -github.com/koron/go-ssdp v0.0.3/go.mod h1:b2MxI6yh02pKrsyNoQUsk4+YNikaGhe4894J+Q5lDvA= +github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= +github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -643,6 +664,8 @@ github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+ github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= github.com/ldez/gomoddirectives v0.2.1/go.mod h1:sGicqkRgBOg//JfpXwkB9Hj0X5RyJ7mlACM5B9f6Me4= github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= +github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= @@ -653,10 +676,10 @@ github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38y github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.24.2 h1:iMViPIcLY0D6zr/f+1Yq9EavCZu2i7eDstsr1nEwSAk= -github.com/libp2p/go-libp2p v0.24.2/go.mod h1:WuxtL2V8yGjam03D93ZBC19tvOUiPpewYv1xdFGWu1k= -github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw= -github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI= +github.com/libp2p/go-libp2p v0.27.8 h1:IX5x/4yKwyPQeVS2AXHZ3J4YATM9oHBGH1gBc23jBAI= +github.com/libp2p/go-libp2p v0.27.8/go.mod h1:eCFFtd0s5i/EVKR7+5Ki8bM7qwkNW3TPTTSSW9sz8NE= +github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s= +github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w= github.com/libp2p/go-libp2p-kad-dht v0.20.0 h1:1bcMa74JFwExCHZMFEmjtHzxX5DovhJ07EtR6UOTEpc= github.com/libp2p/go-libp2p-kad-dht v0.20.0/go.mod h1:qPIXdiZsLczhV4/+4EO1jE8ae0YCW4ZOogc4WVIyTEU= github.com/libp2p/go-libp2p-kbucket v0.5.0 h1:g/7tVm8ACHDxH29BGrpsQlnNeu+6OF1A9bno/4/U1oA= @@ -671,8 +694,6 @@ github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= -github.com/libp2p/go-openssl v0.1.0 h1:LBkKEcUv6vtZIQLVTegAil8jbNpJErQ9AnT+bWV+Ooo= -github.com/libp2p/go-openssl v0.1.0/go.mod h1:OiOxwPpL3n4xlenjx2h7AwSGaFSC/KZvf6gNdOBQMtc= github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560= github.com/libp2p/go-reuseport v0.2.0/go.mod h1:bvVho6eLMm6Bz5hmU0LYN3ixd3nPPvtIlaURZZgOY4k= github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= @@ -681,8 +702,6 @@ github.com/libp2p/go-yamux/v4 v4.0.0/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5 github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/lucas-clemente/quic-go v0.31.1 h1:O8Od7hfioqq0PMYHDyBkxU2aA7iZ2W9pjbrWuja2YR4= -github.com/lucas-clemente/quic-go v0.31.1/go.mod h1:0wFbizLgYzqHqtlyxyCaJKlE7bYgE6JQ+54TLd/Dq2g= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -692,14 +711,8 @@ github.com/mailjet/mailjet-apiv3-go/v3 v3.1.1 h1:cvU3k9reWbprpX94lcA1/Fd4RMc0bZx github.com/mailjet/mailjet-apiv3-go/v3 v3.1.1/go.mod h1:Nw3mVzRxV0CVDTlzaRcADGKt4PMNbT7gYIyEtjMrVIM= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= -github.com/marten-seemann/qpack v0.3.0 h1:UiWstOgT8+znlkDPOg2+3rIuYXJ2CnGDkGUXN6ki6hE= -github.com/marten-seemann/qtls-go1-18 v0.1.4 h1:ogomB+lWV3Vmwiu6RTwDVTMGx+9j7SEi98e8QB35Its= -github.com/marten-seemann/qtls-go1-18 v0.1.4/go.mod h1:mJttiymBAByA49mhlNZZGrH5u1uXYZJ+RW28Py7f4m4= -github.com/marten-seemann/qtls-go1-19 v0.1.2 h1:ZevAEqKXH0bZmoOBPiqX2h5rhQ7cbZi+X+rlq2JUbCE= -github.com/marten-seemann/qtls-go1-19 v0.1.2/go.mod h1:5HTDWtVudo/WFsHKRNuOhWlbdjrfs5JHrYb0wIJqGpI= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/marten-seemann/webtransport-go v0.4.3 h1:vkt5o/Ci+luknRteWdYGYH1KcB7ziup+J+1PzZJIvmg= github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -712,10 +725,8 @@ github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcME github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-pointer v0.0.1 h1:n+XhsuGeVO6MEAp7xyEukFINEa+Quek5psIR/ylA6o0= -github.com/mattn/go-pointer v0.0.1/go.mod h1:2zXcozF6qYGgmsG+SeTZz3oAbFLdD3OWqnUbNvJZAlc= +github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= +github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= @@ -733,8 +744,8 @@ github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00v github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= -github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw= +github.com/miekg/dns v1.1.53/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= @@ -763,9 +774,11 @@ github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs= @@ -783,22 +796,22 @@ github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9 github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU= -github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= +github.com/multiformats/go-multiaddr v0.9.0 h1:3h4V1LHIk5w4hJHekMKWALPXErDfz/sggzwC/NcqbDQ= +github.com/multiformats/go-multiaddr v0.9.0/go.mod h1:mI67Lb1EeTOYb8GQfL/7wpIZwc46ElrvzhYnoJOmTT0= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= -github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= -github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= -github.com/multiformats/go-multicodec v0.7.0 h1:rTUjGOwjlhGHbEMbPoSUJowG1spZTVsITRANCjKTUAQ= -github.com/multiformats/go-multicodec v0.7.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multicodec v0.8.1 h1:ycepHwavHafh3grIbR1jIXnKCsFm0fqsfEOsJ8NtKE8= +github.com/multiformats/go-multicodec v0.8.1/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108= github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= -github.com/multiformats/go-multistream v0.3.3 h1:d5PZpjwRgVlbwfdTDjife7XszfZd8KYWfROYFlGcR8o= -github.com/multiformats/go-multistream v0.3.3/go.mod h1:ODRoqamLUsETKS9BNcII4gcRsJBU5VAwRIv7O39cEXg= +github.com/multiformats/go-multistream v0.4.1 h1:rFy0Iiyn3YT0asivDUIR05leAdwZq3de4741sbiSdfo= +github.com/multiformats/go-multistream v0.4.1/go.mod h1:Mz5eykRVAjJWckE2U78c6xqdtyNUEhKSM0Lwar2p77Q= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= @@ -843,8 +856,8 @@ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= -github.com/onsi/ginkgo/v2 v2.7.0 h1:/XxtEV3I3Eif/HobnVx9YmJgk8ENdRsuUmM+fLCFNow= -github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo= +github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU= +github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -852,7 +865,7 @@ github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= -github.com/onsi/gomega v1.24.1 h1:KORJXNNTzJXzu4ScJWssJfJMnJ+2QJqhoQSRwNlze9E= +github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -929,8 +942,8 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.25.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= -github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= -github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -956,6 +969,16 @@ github.com/quasilyte/go-ruleguard/rules v0.0.0-20210203162857-b223e0831f88/go.mo github.com/quasilyte/go-ruleguard/rules v0.0.0-20210428214800-545e0d2e0bf7/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/quasilyte/regex/syntax v0.0.0-20200805063351-8f842688393c/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= +github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= +github.com/quic-go/qtls-go1-19 v0.3.3 h1:wznEHvJwd+2X3PqftRha0SUKmGsnb6dfArMhy9PeJVE= +github.com/quic-go/qtls-go1-19 v0.3.3/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI= +github.com/quic-go/qtls-go1-20 v0.2.3 h1:m575dovXn1y2ATOb1XrRFcrv0F+EQmlowTkoraNkDPI= +github.com/quic-go/qtls-go1-20 v0.2.3/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM= +github.com/quic-go/quic-go v0.33.0 h1:ItNoTDN/Fm/zBlq769lLJc8ECe9gYaW40veHCCco7y0= +github.com/quic-go/quic-go v0.33.0/go.mod h1:YMuhaAV9/jIu0XclDXwZPAsP/2Kgr5yMYhe9oxhhOFA= +github.com/quic-go/webtransport-go v0.5.2 h1:GA6Bl6oZY+g/flt00Pnu0XtivSD8vukOu3lYhJjnGEk= +github.com/quic-go/webtransport-go v0.5.2/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -1031,8 +1054,6 @@ github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJ github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= -github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= -github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -1071,8 +1092,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= @@ -1113,7 +1135,11 @@ github.com/tommy-muehle/go-mnd/v2 v2.3.2/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/twitchtv/twirp v7.1.0+incompatible/go.mod h1:RRJoFSAmTEh2weEqWtpPE3vFK5YBhA6bqp2l1kfCC5A= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= @@ -1189,8 +1215,8 @@ go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/dig v1.16.1 h1:+alNIBsl0qfY0j6epRubp/9obgtrObRAc5aD+6jbWY8= go.uber.org/dig v1.16.1/go.mod h1:557JTAUZT5bUK0SvCwikmLPPtdQhfvLYtO5tJgQSbnk= -go.uber.org/fx v1.19.1 h1:JwYIYAQzXBuBBwSZ1/tn/95pnQO/Sp3yE8lWj9eSAzI= -go.uber.org/fx v1.19.1/go.mod h1:bGK+AEy7XUwTBkqCsK/vDyFF0JJOA6X5KWpNC0e6qTA= +go.uber.org/fx v1.19.2 h1:SyFgYQFr1Wl0AYstE8vyYIzP4bFz2URrScjwC4cwUvY= +go.uber.org/fx v1.19.2/go.mod h1:43G1VcqSzbIv77y00p1DRAsyZS8WdzuYdhZXmEUkMyQ= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -1198,8 +1224,8 @@ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+ go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= @@ -1247,8 +1273,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp v0.0.0-20230113213754-f9f960f08ad4 h1:CNkDRtCj8otM5CFz5jYvbr8ioXX8flVsLfDWEj0M5kk= -golang.org/x/exp v0.0.0-20230113213754-f9f960f08ad4/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1274,8 +1300,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= +golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1331,7 +1357,6 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210521195947-fe42d452be8f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1342,8 +1367,8 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8= -golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= +golang.org/x/oauth2 v0.5.0 h1:HuArIo48skDwlrvM3sEdHXElYslAMsf3KwRkkW4MC4s= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1440,7 +1465,7 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210521203332-0cec03c779c1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -1561,9 +1586,8 @@ golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0t golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2-0.20210512205948-8287d5da45e4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1694,8 +1718,8 @@ google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX7 google.golang.org/protobuf v1.25.1-0.20201208041424-160c7477e0e8/go.mod h1:hFxJC2f0epmp1elRCiEGJTKAWbwxZ2nvqZdHl3FQXCY= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= @@ -1751,6 +1775,8 @@ mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIa mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= mvdan.cc/unparam v0.0.0-20210520122750-2ac67f130a88/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= +nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/github.com/containerd/cgroups/README.md b/vendor/github.com/containerd/cgroups/README.md index eccb9d98..d2073af3 100644 --- a/vendor/github.com/containerd/cgroups/README.md +++ b/vendor/github.com/containerd/cgroups/README.md @@ -9,7 +9,7 @@ Go package for creating, managing, inspecting, and destroying cgroups. The resources format for settings on the cgroup uses the OCI runtime-spec found [here](https://github.com/opencontainers/runtime-spec). -## Examples +## Examples (v1) ### Create a new cgroup @@ -58,7 +58,7 @@ if err := control.Add(cgroups.Process{Pid:1234}); err != nil { } ``` -### Update the cgroup +### Update the cgroup To update the resources applied in the cgroup @@ -133,6 +133,61 @@ event := cgroups.OOMEvent() efd, err := control.RegisterMemoryEvent(event) ``` +## Examples (v2/unified) + +### Check that the current system is running cgroups v2 + +```go +var cgroupV2 bool +if cgroups.Mode() == cgroups.Unified { + cgroupV2 = true +} +``` + +### Create a new cgroup + +This creates a new systemd v2 cgroup slice. Systemd slices consider ["-" a special character](https://www.freedesktop.org/software/systemd/man/systemd.slice.html), +so the resulting slice would be located here on disk: + +* /sys/fs/cgroup/my.slice/my-cgroup.slice/my-cgroup-abc.slice + +```go +import ( + cgroupsv2 "github.com/containerd/cgroups/v2" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +res := cgroupsv2.Resources{} +// dummy PID of -1 is used for creating a "general slice" to be used as a parent cgroup. +// see https://github.com/containerd/cgroups/blob/1df78138f1e1e6ee593db155c6b369466f577651/v2/manager.go#L732-L735 +m, err := cgroupsv2.NewSystemd("/", "my-cgroup-abc.slice", -1, &res) +if err != nil { + return err +} +``` + +### Load an existing cgroup + +```go +m, err := cgroupsv2.LoadSystemd("/", "my-cgroup-abc.slice") +if err != nil { + return err +} +``` + +### Delete a cgroup + +```go +m, err := cgroupsv2.LoadSystemd("/", "my-cgroup-abc.slice") +if err != nil { + return err +} +err = m.DeleteSystemd() +if err != nil { + return err +} +``` + ### Attention All static path should not include `/sys/fs/cgroup/` prefix, it should start with your own cgroups name diff --git a/vendor/github.com/containerd/cgroups/Vagrantfile b/vendor/github.com/containerd/cgroups/Vagrantfile deleted file mode 100644 index 9a4aac8c..00000000 --- a/vendor/github.com/containerd/cgroups/Vagrantfile +++ /dev/null @@ -1,46 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -Vagrant.configure("2") do |config| -# Fedora box is used for testing cgroup v2 support - config.vm.box = "fedora/35-cloud-base" - config.vm.provider :virtualbox do |v| - v.memory = 4096 - v.cpus = 2 - end - config.vm.provider :libvirt do |v| - v.memory = 4096 - v.cpus = 2 - end - config.vm.provision "shell", inline: <<-SHELL - set -eux -o pipefail - # configuration - GO_VERSION="1.17.7" - - # install gcc and Golang - dnf -y install gcc - curl -fsSL "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" | tar Cxz /usr/local - - # setup env vars - cat >> /etc/profile.d/sh.local < /test.sh < 0 { + errs = append(errs, fmt.Sprintf("%s (contains running processes)", string(s.Name()))) + continue + } if d, ok := s.(deleter); ok { sp, err := c.path(s.Name()) if err != nil { @@ -247,6 +256,7 @@ func (c *cgroup) Delete() error { if err := remove(path); err != nil { errs = append(errs, path) } + continue } } if len(errs) > 0 { diff --git a/vendor/github.com/containerd/cgroups/cpuacct.go b/vendor/github.com/containerd/cgroups/cpuacct.go index e5fc864b..1022fa37 100644 --- a/vendor/github.com/containerd/cgroups/cpuacct.go +++ b/vendor/github.com/containerd/cgroups/cpuacct.go @@ -17,8 +17,9 @@ package cgroups import ( + "bufio" "fmt" - "io/ioutil" + "os" "path/filepath" "strconv" "strings" @@ -70,7 +71,7 @@ func (c *cpuacctController) Stat(path string, stats *v1.Metrics) error { func (c *cpuacctController) percpuUsage(path string) ([]uint64, error) { var usage []uint64 - data, err := ioutil.ReadFile(filepath.Join(c.Path(path), "cpuacct.usage_percpu")) + data, err := os.ReadFile(filepath.Join(c.Path(path), "cpuacct.usage_percpu")) if err != nil { return nil, err } @@ -86,36 +87,41 @@ func (c *cpuacctController) percpuUsage(path string) ([]uint64, error) { func (c *cpuacctController) getUsage(path string) (user uint64, kernel uint64, err error) { statPath := filepath.Join(c.Path(path), "cpuacct.stat") - data, err := ioutil.ReadFile(statPath) + f, err := os.Open(statPath) if err != nil { return 0, 0, err } - fields := strings.Fields(string(data)) - if len(fields) != 4 { - return 0, 0, fmt.Errorf("%q is expected to have 4 fields", statPath) + defer f.Close() + var ( + raw = make(map[string]uint64) + sc = bufio.NewScanner(f) + ) + for sc.Scan() { + key, v, err := parseKV(sc.Text()) + if err != nil { + return 0, 0, err + } + raw[key] = v + } + if err := sc.Err(); err != nil { + return 0, 0, err } for _, t := range []struct { - index int name string value *uint64 }{ { - index: 0, name: "user", value: &user, }, { - index: 2, name: "system", value: &kernel, }, } { - if fields[t.index] != t.name { - return 0, 0, fmt.Errorf("expected field %q but found %q in %q", t.name, fields[t.index], statPath) - } - v, err := strconv.ParseUint(fields[t.index+1], 10, 64) - if err != nil { - return 0, 0, err + v, ok := raw[t.name] + if !ok { + return 0, 0, fmt.Errorf("expected field %q but not found in %q", t.name, statPath) } *t.value = v } diff --git a/vendor/github.com/containerd/cgroups/cpuset.go b/vendor/github.com/containerd/cgroups/cpuset.go index 3cae173b..8b56d3db 100644 --- a/vendor/github.com/containerd/cgroups/cpuset.go +++ b/vendor/github.com/containerd/cgroups/cpuset.go @@ -19,7 +19,6 @@ package cgroups import ( "bytes" "fmt" - "io/ioutil" "os" "path/filepath" @@ -87,10 +86,10 @@ func (c *cpusetController) Update(path string, resources *specs.LinuxResources) } func (c *cpusetController) getValues(path string) (cpus []byte, mems []byte, err error) { - if cpus, err = ioutil.ReadFile(filepath.Join(path, "cpuset.cpus")); err != nil && !os.IsNotExist(err) { + if cpus, err = os.ReadFile(filepath.Join(path, "cpuset.cpus")); err != nil && !os.IsNotExist(err) { return } - if mems, err = ioutil.ReadFile(filepath.Join(path, "cpuset.mems")); err != nil && !os.IsNotExist(err) { + if mems, err = os.ReadFile(filepath.Join(path, "cpuset.mems")); err != nil && !os.IsNotExist(err) { return } return cpus, mems, nil diff --git a/vendor/github.com/containerd/cgroups/freezer.go b/vendor/github.com/containerd/cgroups/freezer.go index 59a7e712..5783f0dc 100644 --- a/vendor/github.com/containerd/cgroups/freezer.go +++ b/vendor/github.com/containerd/cgroups/freezer.go @@ -17,7 +17,7 @@ package cgroups import ( - "io/ioutil" + "os" "path/filepath" "strings" "time" @@ -58,7 +58,7 @@ func (f *freezerController) changeState(path string, state State) error { } func (f *freezerController) state(path string) (State, error) { - current, err := ioutil.ReadFile(filepath.Join(f.root, path, "freezer.state")) + current, err := os.ReadFile(filepath.Join(f.root, path, "freezer.state")) if err != nil { return "", err } diff --git a/vendor/github.com/containerd/cgroups/pids.go b/vendor/github.com/containerd/cgroups/pids.go index ce78e44c..66a1b6b4 100644 --- a/vendor/github.com/containerd/cgroups/pids.go +++ b/vendor/github.com/containerd/cgroups/pids.go @@ -17,7 +17,6 @@ package cgroups import ( - "io/ioutil" "os" "path/filepath" "strconv" @@ -69,7 +68,7 @@ func (p *pidsController) Stat(path string, stats *v1.Metrics) error { return err } var max uint64 - maxData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "pids.max")) + maxData, err := os.ReadFile(filepath.Join(p.Path(path), "pids.max")) if err != nil { return err } diff --git a/vendor/github.com/containerd/cgroups/rdma.go b/vendor/github.com/containerd/cgroups/rdma.go index 3b59b107..9d414203 100644 --- a/vendor/github.com/containerd/cgroups/rdma.go +++ b/vendor/github.com/containerd/cgroups/rdma.go @@ -17,7 +17,6 @@ package cgroups import ( - "io/ioutil" "math" "os" "path/filepath" @@ -126,13 +125,13 @@ func toRdmaEntry(strEntries []string) []*v1.RdmaEntry { func (p *rdmaController) Stat(path string, stats *v1.Metrics) error { - currentData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "rdma.current")) + currentData, err := os.ReadFile(filepath.Join(p.Path(path), "rdma.current")) if err != nil { return err } currentPerDevices := strings.Split(string(currentData), "\n") - maxData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "rdma.max")) + maxData, err := os.ReadFile(filepath.Join(p.Path(path), "rdma.max")) if err != nil { return err } diff --git a/vendor/github.com/containerd/cgroups/utils.go b/vendor/github.com/containerd/cgroups/utils.go index 21713897..c17a3a41 100644 --- a/vendor/github.com/containerd/cgroups/utils.go +++ b/vendor/github.com/containerd/cgroups/utils.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "strconv" @@ -200,7 +199,7 @@ func hugePageSizes() ([]string, error) { pageSizes []string sizeList = []string{"B", "KB", "MB", "GB", "TB", "PB"} ) - files, err := ioutil.ReadDir("/sys/kernel/mm/hugepages") + files, err := os.ReadDir("/sys/kernel/mm/hugepages") if err != nil { return nil, err } @@ -216,7 +215,7 @@ func hugePageSizes() ([]string, error) { } func readUint(path string) (uint64, error) { - v, err := ioutil.ReadFile(path) + v, err := os.ReadFile(path) if err != nil { return 0, err } @@ -382,7 +381,7 @@ func retryingWriteFile(path string, data []byte, mode os.FileMode) error { // Retry writes on EINTR; see: // https://github.com/golang/go/issues/38033 for { - err := ioutil.WriteFile(path, data, mode) + err := os.WriteFile(path, data, mode) if err == nil { return nil } else if !errors.Is(err, syscall.EINTR) { diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go index 60e82caa..6c16c255 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/decode.go +++ b/vendor/github.com/golang/protobuf/jsonpb/decode.go @@ -386,8 +386,14 @@ func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error } func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { + if fd.Cardinality() == protoreflect.Repeated { + return false + } if md := fd.Message(); md != nil { - return md.FullName() == "google.protobuf.Value" && fd.Cardinality() != protoreflect.Repeated + return md.FullName() == "google.protobuf.Value" + } + if ed := fd.Enum(); ed != nil { + return ed.FullName() == "google.protobuf.NullValue" } return false } diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index 4ec00fe7..60ef7e92 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -72,9 +72,23 @@ type ValueType struct { type Sample struct { Location []*Location Value []int64 - Label map[string][]string + // Label is a per-label-key map to values for string labels. + // + // In general, having multiple values for the given label key is strongly + // discouraged - see docs for the sample label field in profile.proto. The + // main reason this unlikely state is tracked here is to make the + // decoding->encoding roundtrip not lossy. But we expect that the value + // slices present in this map are always of length 1. + Label map[string][]string + // NumLabel is a per-label-key map to values for numeric labels. See a note + // above on handling multiple values for a label. NumLabel map[string][]int64 - NumUnit map[string][]string + // NumUnit is a per-label-key map to the unit names of corresponding numeric + // label values. The unit info may be missing even if the label is in + // NumLabel, see the docs in profile.proto for details. When the value is + // slice is present and not nil, its length must be equal to the length of + // the corresponding value slice in NumLabel. + NumUnit map[string][]string locationIDX []uint64 labelX []label @@ -715,6 +729,35 @@ func (s *Sample) HasLabel(key, value string) bool { return false } +// SetNumLabel sets the specified key to the specified value for all samples in the +// profile. "unit" is a slice that describes the units that each corresponding member +// of "values" is measured in (e.g. bytes or seconds). If there is no relevant +// unit for a given value, that member of "unit" should be the empty string. +// "unit" must either have the same length as "value", or be nil. +func (p *Profile) SetNumLabel(key string, value []int64, unit []string) { + for _, sample := range p.Sample { + if sample.NumLabel == nil { + sample.NumLabel = map[string][]int64{key: value} + } else { + sample.NumLabel[key] = value + } + if sample.NumUnit == nil { + sample.NumUnit = map[string][]string{key: unit} + } else { + sample.NumUnit[key] = unit + } + } +} + +// RemoveNumLabel removes all numerical labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveNumLabel(key string) { + for _, sample := range p.Sample { + delete(sample.NumLabel, key) + delete(sample.NumUnit, key) + } +} + // DiffBaseSample returns true if a sample belongs to the diff base and false // otherwise. func (s *Sample) DiffBaseSample() bool { diff --git a/vendor/github.com/huin/goupnp/README.md b/vendor/github.com/huin/goupnp/README.md index cd837978..49bd0388 100644 --- a/vendor/github.com/huin/goupnp/README.md +++ b/vendor/github.com/huin/goupnp/README.md @@ -63,3 +63,14 @@ func init() { goupnp.CharsetReaderFault = charset.NewReaderLabel } ``` + +## `v2alpha` + +The `v2alpha` subdirectory contains experimental work on a version 2 API. The plan is to eventually +create a `v2` subdirectory with a stable version of the version 2 API. The v1 API will stay where +it currently is. + +> NOTE: +> +> * `v2alpha` will be deleted one day, so don't rely on it always existing. +> * `v2alpha` will have API breaking changes, even with itself. diff --git a/vendor/github.com/huin/goupnp/dcps/internetgateway1/gen.go b/vendor/github.com/huin/goupnp/dcps/internetgateway1/gen.go index 2b146a34..e6af2bb1 100644 --- a/vendor/github.com/huin/goupnp/dcps/internetgateway1/gen.go +++ b/vendor/github.com/huin/goupnp/dcps/internetgateway1/gen.go @@ -1,2 +1,2 @@ -//go:generate goupnpdcpgen -dcp_name internetgateway1 +//go:generate goupnpdcpgen -dcp_name internetgateway1 -code_tmpl_file ../dcps.gotemplate package internetgateway1 diff --git a/vendor/github.com/huin/goupnp/dcps/internetgateway1/internetgateway1.go b/vendor/github.com/huin/goupnp/dcps/internetgateway1/internetgateway1.go index 942bb092..098083b0 100644 --- a/vendor/github.com/huin/goupnp/dcps/internetgateway1/internetgateway1.go +++ b/vendor/github.com/huin/goupnp/dcps/internetgateway1/internetgateway1.go @@ -49,35 +49,47 @@ type LANHostConfigManagement1 struct { goupnp.ServiceClient } -// NewLANHostConfigManagement1Clients discovers instances of the service on the network, +// NewLANHostConfigManagement1ClientsCtx discovers instances of the service on the network, // and returns clients to any that are found. errors will contain an error for // any devices that replied but which could not be queried, and err will be set // if the discovery process failed outright. // // This is a typical entry calling point into this package. -func NewLANHostConfigManagement1Clients() (clients []*LANHostConfigManagement1, errors []error, err error) { +func NewLANHostConfigManagement1ClientsCtx(ctx context.Context) (clients []*LANHostConfigManagement1, errors []error, err error) { var genericClients []goupnp.ServiceClient - if genericClients, errors, err = goupnp.NewServiceClients(URN_LANHostConfigManagement_1); err != nil { + if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_LANHostConfigManagement_1); err != nil { return } clients = newLANHostConfigManagement1ClientsFromGenericClients(genericClients) return } -// NewLANHostConfigManagement1ClientsByURL discovers instances of the service at the given +// NewLANHostConfigManagement1Clients is the legacy version of NewLANHostConfigManagement1ClientsCtx, but uses +// context.Background() as the context. +func NewLANHostConfigManagement1Clients() (clients []*LANHostConfigManagement1, errors []error, err error) { + return NewLANHostConfigManagement1ClientsCtx(context.Background()) +} + +// NewLANHostConfigManagement1ClientsByURLCtx discovers instances of the service at the given // URL, and returns clients to any that are found. An error is returned if // there was an error probing the service. // // This is a typical entry calling point into this package when reusing an // previously discovered service URL. -func NewLANHostConfigManagement1ClientsByURL(loc *url.URL) ([]*LANHostConfigManagement1, error) { - genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_LANHostConfigManagement_1) +func NewLANHostConfigManagement1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*LANHostConfigManagement1, error) { + genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_LANHostConfigManagement_1) if err != nil { return nil, err } return newLANHostConfigManagement1ClientsFromGenericClients(genericClients), nil } +// NewLANHostConfigManagement1ClientsByURL is the legacy version of NewLANHostConfigManagement1ClientsByURLCtx, but uses +// context.Background() as the context. +func NewLANHostConfigManagement1ClientsByURL(loc *url.URL) ([]*LANHostConfigManagement1, error) { + return NewLANHostConfigManagement1ClientsByURLCtx(context.Background(), loc) +} + // NewLANHostConfigManagement1ClientsFromRootDevice discovers instances of the service in // a given root device, and returns clients to any that are found. An error is // returned if there was not at least one instance of the service within the @@ -798,35 +810,47 @@ type Layer3Forwarding1 struct { goupnp.ServiceClient } -// NewLayer3Forwarding1Clients discovers instances of the service on the network, +// NewLayer3Forwarding1ClientsCtx discovers instances of the service on the network, // and returns clients to any that are found. errors will contain an error for // any devices that replied but which could not be queried, and err will be set // if the discovery process failed outright. // // This is a typical entry calling point into this package. -func NewLayer3Forwarding1Clients() (clients []*Layer3Forwarding1, errors []error, err error) { +func NewLayer3Forwarding1ClientsCtx(ctx context.Context) (clients []*Layer3Forwarding1, errors []error, err error) { var genericClients []goupnp.ServiceClient - if genericClients, errors, err = goupnp.NewServiceClients(URN_Layer3Forwarding_1); err != nil { + if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_Layer3Forwarding_1); err != nil { return } clients = newLayer3Forwarding1ClientsFromGenericClients(genericClients) return } -// NewLayer3Forwarding1ClientsByURL discovers instances of the service at the given +// NewLayer3Forwarding1Clients is the legacy version of NewLayer3Forwarding1ClientsCtx, but uses +// context.Background() as the context. +func NewLayer3Forwarding1Clients() (clients []*Layer3Forwarding1, errors []error, err error) { + return NewLayer3Forwarding1ClientsCtx(context.Background()) +} + +// NewLayer3Forwarding1ClientsByURLCtx discovers instances of the service at the given // URL, and returns clients to any that are found. An error is returned if // there was an error probing the service. // // This is a typical entry calling point into this package when reusing an // previously discovered service URL. -func NewLayer3Forwarding1ClientsByURL(loc *url.URL) ([]*Layer3Forwarding1, error) { - genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_Layer3Forwarding_1) +func NewLayer3Forwarding1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*Layer3Forwarding1, error) { + genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_Layer3Forwarding_1) if err != nil { return nil, err } return newLayer3Forwarding1ClientsFromGenericClients(genericClients), nil } +// NewLayer3Forwarding1ClientsByURL is the legacy version of NewLayer3Forwarding1ClientsByURLCtx, but uses +// context.Background() as the context. +func NewLayer3Forwarding1ClientsByURL(loc *url.URL) ([]*Layer3Forwarding1, error) { + return NewLayer3Forwarding1ClientsByURLCtx(context.Background(), loc) +} + // NewLayer3Forwarding1ClientsFromRootDevice discovers instances of the service in // a given root device, and returns clients to any that are found. An error is // returned if there was not at least one instance of the service within the @@ -929,35 +953,47 @@ type WANCableLinkConfig1 struct { goupnp.ServiceClient } -// NewWANCableLinkConfig1Clients discovers instances of the service on the network, +// NewWANCableLinkConfig1ClientsCtx discovers instances of the service on the network, // and returns clients to any that are found. errors will contain an error for // any devices that replied but which could not be queried, and err will be set // if the discovery process failed outright. // // This is a typical entry calling point into this package. -func NewWANCableLinkConfig1Clients() (clients []*WANCableLinkConfig1, errors []error, err error) { +func NewWANCableLinkConfig1ClientsCtx(ctx context.Context) (clients []*WANCableLinkConfig1, errors []error, err error) { var genericClients []goupnp.ServiceClient - if genericClients, errors, err = goupnp.NewServiceClients(URN_WANCableLinkConfig_1); err != nil { + if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANCableLinkConfig_1); err != nil { return } clients = newWANCableLinkConfig1ClientsFromGenericClients(genericClients) return } -// NewWANCableLinkConfig1ClientsByURL discovers instances of the service at the given +// NewWANCableLinkConfig1Clients is the legacy version of NewWANCableLinkConfig1ClientsCtx, but uses +// context.Background() as the context. +func NewWANCableLinkConfig1Clients() (clients []*WANCableLinkConfig1, errors []error, err error) { + return NewWANCableLinkConfig1ClientsCtx(context.Background()) +} + +// NewWANCableLinkConfig1ClientsByURLCtx discovers instances of the service at the given // URL, and returns clients to any that are found. An error is returned if // there was an error probing the service. // // This is a typical entry calling point into this package when reusing an // previously discovered service URL. -func NewWANCableLinkConfig1ClientsByURL(loc *url.URL) ([]*WANCableLinkConfig1, error) { - genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANCableLinkConfig_1) +func NewWANCableLinkConfig1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANCableLinkConfig1, error) { + genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANCableLinkConfig_1) if err != nil { return nil, err } return newWANCableLinkConfig1ClientsFromGenericClients(genericClients), nil } +// NewWANCableLinkConfig1ClientsByURL is the legacy version of NewWANCableLinkConfig1ClientsByURLCtx, but uses +// context.Background() as the context. +func NewWANCableLinkConfig1ClientsByURL(loc *url.URL) ([]*WANCableLinkConfig1, error) { + return NewWANCableLinkConfig1ClientsByURLCtx(context.Background(), loc) +} + // NewWANCableLinkConfig1ClientsFromRootDevice discovers instances of the service in // a given root device, and returns clients to any that are found. An error is // returned if there was not at least one instance of the service within the @@ -1347,35 +1383,47 @@ type WANCommonInterfaceConfig1 struct { goupnp.ServiceClient } -// NewWANCommonInterfaceConfig1Clients discovers instances of the service on the network, +// NewWANCommonInterfaceConfig1ClientsCtx discovers instances of the service on the network, // and returns clients to any that are found. errors will contain an error for // any devices that replied but which could not be queried, and err will be set // if the discovery process failed outright. // // This is a typical entry calling point into this package. -func NewWANCommonInterfaceConfig1Clients() (clients []*WANCommonInterfaceConfig1, errors []error, err error) { +func NewWANCommonInterfaceConfig1ClientsCtx(ctx context.Context) (clients []*WANCommonInterfaceConfig1, errors []error, err error) { var genericClients []goupnp.ServiceClient - if genericClients, errors, err = goupnp.NewServiceClients(URN_WANCommonInterfaceConfig_1); err != nil { + if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANCommonInterfaceConfig_1); err != nil { return } clients = newWANCommonInterfaceConfig1ClientsFromGenericClients(genericClients) return } -// NewWANCommonInterfaceConfig1ClientsByURL discovers instances of the service at the given +// NewWANCommonInterfaceConfig1Clients is the legacy version of NewWANCommonInterfaceConfig1ClientsCtx, but uses +// context.Background() as the context. +func NewWANCommonInterfaceConfig1Clients() (clients []*WANCommonInterfaceConfig1, errors []error, err error) { + return NewWANCommonInterfaceConfig1ClientsCtx(context.Background()) +} + +// NewWANCommonInterfaceConfig1ClientsByURLCtx discovers instances of the service at the given // URL, and returns clients to any that are found. An error is returned if // there was an error probing the service. // // This is a typical entry calling point into this package when reusing an // previously discovered service URL. -func NewWANCommonInterfaceConfig1ClientsByURL(loc *url.URL) ([]*WANCommonInterfaceConfig1, error) { - genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANCommonInterfaceConfig_1) +func NewWANCommonInterfaceConfig1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANCommonInterfaceConfig1, error) { + genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANCommonInterfaceConfig_1) if err != nil { return nil, err } return newWANCommonInterfaceConfig1ClientsFromGenericClients(genericClients), nil } +// NewWANCommonInterfaceConfig1ClientsByURL is the legacy version of NewWANCommonInterfaceConfig1ClientsByURLCtx, but uses +// context.Background() as the context. +func NewWANCommonInterfaceConfig1ClientsByURL(loc *url.URL) ([]*WANCommonInterfaceConfig1, error) { + return NewWANCommonInterfaceConfig1ClientsByURLCtx(context.Background(), loc) +} + // NewWANCommonInterfaceConfig1ClientsFromRootDevice discovers instances of the service in // a given root device, and returns clients to any that are found. An error is // returned if there was not at least one instance of the service within the @@ -1784,35 +1832,47 @@ type WANDSLLinkConfig1 struct { goupnp.ServiceClient } -// NewWANDSLLinkConfig1Clients discovers instances of the service on the network, +// NewWANDSLLinkConfig1ClientsCtx discovers instances of the service on the network, // and returns clients to any that are found. errors will contain an error for // any devices that replied but which could not be queried, and err will be set // if the discovery process failed outright. // // This is a typical entry calling point into this package. -func NewWANDSLLinkConfig1Clients() (clients []*WANDSLLinkConfig1, errors []error, err error) { +func NewWANDSLLinkConfig1ClientsCtx(ctx context.Context) (clients []*WANDSLLinkConfig1, errors []error, err error) { var genericClients []goupnp.ServiceClient - if genericClients, errors, err = goupnp.NewServiceClients(URN_WANDSLLinkConfig_1); err != nil { + if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANDSLLinkConfig_1); err != nil { return } clients = newWANDSLLinkConfig1ClientsFromGenericClients(genericClients) return } -// NewWANDSLLinkConfig1ClientsByURL discovers instances of the service at the given +// NewWANDSLLinkConfig1Clients is the legacy version of NewWANDSLLinkConfig1ClientsCtx, but uses +// context.Background() as the context. +func NewWANDSLLinkConfig1Clients() (clients []*WANDSLLinkConfig1, errors []error, err error) { + return NewWANDSLLinkConfig1ClientsCtx(context.Background()) +} + +// NewWANDSLLinkConfig1ClientsByURLCtx discovers instances of the service at the given // URL, and returns clients to any that are found. An error is returned if // there was an error probing the service. // // This is a typical entry calling point into this package when reusing an // previously discovered service URL. -func NewWANDSLLinkConfig1ClientsByURL(loc *url.URL) ([]*WANDSLLinkConfig1, error) { - genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANDSLLinkConfig_1) +func NewWANDSLLinkConfig1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANDSLLinkConfig1, error) { + genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANDSLLinkConfig_1) if err != nil { return nil, err } return newWANDSLLinkConfig1ClientsFromGenericClients(genericClients), nil } +// NewWANDSLLinkConfig1ClientsByURL is the legacy version of NewWANDSLLinkConfig1ClientsByURLCtx, but uses +// context.Background() as the context. +func NewWANDSLLinkConfig1ClientsByURL(loc *url.URL) ([]*WANDSLLinkConfig1, error) { + return NewWANDSLLinkConfig1ClientsByURLCtx(context.Background(), loc) +} + // NewWANDSLLinkConfig1ClientsFromRootDevice discovers instances of the service in // a given root device, and returns clients to any that are found. An error is // returned if there was not at least one instance of the service within the @@ -2204,35 +2264,47 @@ type WANEthernetLinkConfig1 struct { goupnp.ServiceClient } -// NewWANEthernetLinkConfig1Clients discovers instances of the service on the network, +// NewWANEthernetLinkConfig1ClientsCtx discovers instances of the service on the network, // and returns clients to any that are found. errors will contain an error for // any devices that replied but which could not be queried, and err will be set // if the discovery process failed outright. // // This is a typical entry calling point into this package. -func NewWANEthernetLinkConfig1Clients() (clients []*WANEthernetLinkConfig1, errors []error, err error) { +func NewWANEthernetLinkConfig1ClientsCtx(ctx context.Context) (clients []*WANEthernetLinkConfig1, errors []error, err error) { var genericClients []goupnp.ServiceClient - if genericClients, errors, err = goupnp.NewServiceClients(URN_WANEthernetLinkConfig_1); err != nil { + if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANEthernetLinkConfig_1); err != nil { return } clients = newWANEthernetLinkConfig1ClientsFromGenericClients(genericClients) return } -// NewWANEthernetLinkConfig1ClientsByURL discovers instances of the service at the given +// NewWANEthernetLinkConfig1Clients is the legacy version of NewWANEthernetLinkConfig1ClientsCtx, but uses +// context.Background() as the context. +func NewWANEthernetLinkConfig1Clients() (clients []*WANEthernetLinkConfig1, errors []error, err error) { + return NewWANEthernetLinkConfig1ClientsCtx(context.Background()) +} + +// NewWANEthernetLinkConfig1ClientsByURLCtx discovers instances of the service at the given // URL, and returns clients to any that are found. An error is returned if // there was an error probing the service. // // This is a typical entry calling point into this package when reusing an // previously discovered service URL. -func NewWANEthernetLinkConfig1ClientsByURL(loc *url.URL) ([]*WANEthernetLinkConfig1, error) { - genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANEthernetLinkConfig_1) +func NewWANEthernetLinkConfig1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANEthernetLinkConfig1, error) { + genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANEthernetLinkConfig_1) if err != nil { return nil, err } return newWANEthernetLinkConfig1ClientsFromGenericClients(genericClients), nil } +// NewWANEthernetLinkConfig1ClientsByURL is the legacy version of NewWANEthernetLinkConfig1ClientsByURLCtx, but uses +// context.Background() as the context. +func NewWANEthernetLinkConfig1ClientsByURL(loc *url.URL) ([]*WANEthernetLinkConfig1, error) { + return NewWANEthernetLinkConfig1ClientsByURLCtx(context.Background(), loc) +} + // NewWANEthernetLinkConfig1ClientsFromRootDevice discovers instances of the service in // a given root device, and returns clients to any that are found. An error is // returned if there was not at least one instance of the service within the @@ -2302,35 +2374,47 @@ type WANIPConnection1 struct { goupnp.ServiceClient } -// NewWANIPConnection1Clients discovers instances of the service on the network, +// NewWANIPConnection1ClientsCtx discovers instances of the service on the network, // and returns clients to any that are found. errors will contain an error for // any devices that replied but which could not be queried, and err will be set // if the discovery process failed outright. // // This is a typical entry calling point into this package. -func NewWANIPConnection1Clients() (clients []*WANIPConnection1, errors []error, err error) { +func NewWANIPConnection1ClientsCtx(ctx context.Context) (clients []*WANIPConnection1, errors []error, err error) { var genericClients []goupnp.ServiceClient - if genericClients, errors, err = goupnp.NewServiceClients(URN_WANIPConnection_1); err != nil { + if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANIPConnection_1); err != nil { return } clients = newWANIPConnection1ClientsFromGenericClients(genericClients) return } -// NewWANIPConnection1ClientsByURL discovers instances of the service at the given +// NewWANIPConnection1Clients is the legacy version of NewWANIPConnection1ClientsCtx, but uses +// context.Background() as the context. +func NewWANIPConnection1Clients() (clients []*WANIPConnection1, errors []error, err error) { + return NewWANIPConnection1ClientsCtx(context.Background()) +} + +// NewWANIPConnection1ClientsByURLCtx discovers instances of the service at the given // URL, and returns clients to any that are found. An error is returned if // there was an error probing the service. // // This is a typical entry calling point into this package when reusing an // previously discovered service URL. -func NewWANIPConnection1ClientsByURL(loc *url.URL) ([]*WANIPConnection1, error) { - genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANIPConnection_1) +func NewWANIPConnection1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANIPConnection1, error) { + genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANIPConnection_1) if err != nil { return nil, err } return newWANIPConnection1ClientsFromGenericClients(genericClients), nil } +// NewWANIPConnection1ClientsByURL is the legacy version of NewWANIPConnection1ClientsByURLCtx, but uses +// context.Background() as the context. +func NewWANIPConnection1ClientsByURL(loc *url.URL) ([]*WANIPConnection1, error) { + return NewWANIPConnection1ClientsByURLCtx(context.Background(), loc) +} + // NewWANIPConnection1ClientsFromRootDevice discovers instances of the service in // a given root device, and returns clients to any that are found. An error is // returned if there was not at least one instance of the service within the @@ -3148,35 +3232,47 @@ type WANPOTSLinkConfig1 struct { goupnp.ServiceClient } -// NewWANPOTSLinkConfig1Clients discovers instances of the service on the network, +// NewWANPOTSLinkConfig1ClientsCtx discovers instances of the service on the network, // and returns clients to any that are found. errors will contain an error for // any devices that replied but which could not be queried, and err will be set // if the discovery process failed outright. // // This is a typical entry calling point into this package. -func NewWANPOTSLinkConfig1Clients() (clients []*WANPOTSLinkConfig1, errors []error, err error) { +func NewWANPOTSLinkConfig1ClientsCtx(ctx context.Context) (clients []*WANPOTSLinkConfig1, errors []error, err error) { var genericClients []goupnp.ServiceClient - if genericClients, errors, err = goupnp.NewServiceClients(URN_WANPOTSLinkConfig_1); err != nil { + if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANPOTSLinkConfig_1); err != nil { return } clients = newWANPOTSLinkConfig1ClientsFromGenericClients(genericClients) return } -// NewWANPOTSLinkConfig1ClientsByURL discovers instances of the service at the given +// NewWANPOTSLinkConfig1Clients is the legacy version of NewWANPOTSLinkConfig1ClientsCtx, but uses +// context.Background() as the context. +func NewWANPOTSLinkConfig1Clients() (clients []*WANPOTSLinkConfig1, errors []error, err error) { + return NewWANPOTSLinkConfig1ClientsCtx(context.Background()) +} + +// NewWANPOTSLinkConfig1ClientsByURLCtx discovers instances of the service at the given // URL, and returns clients to any that are found. An error is returned if // there was an error probing the service. // // This is a typical entry calling point into this package when reusing an // previously discovered service URL. -func NewWANPOTSLinkConfig1ClientsByURL(loc *url.URL) ([]*WANPOTSLinkConfig1, error) { - genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANPOTSLinkConfig_1) +func NewWANPOTSLinkConfig1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANPOTSLinkConfig1, error) { + genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANPOTSLinkConfig_1) if err != nil { return nil, err } return newWANPOTSLinkConfig1ClientsFromGenericClients(genericClients), nil } +// NewWANPOTSLinkConfig1ClientsByURL is the legacy version of NewWANPOTSLinkConfig1ClientsByURLCtx, but uses +// context.Background() as the context. +func NewWANPOTSLinkConfig1ClientsByURL(loc *url.URL) ([]*WANPOTSLinkConfig1, error) { + return NewWANPOTSLinkConfig1ClientsByURLCtx(context.Background(), loc) +} + // NewWANPOTSLinkConfig1ClientsFromRootDevice discovers instances of the service in // a given root device, and returns clients to any that are found. An error is // returned if there was not at least one instance of the service within the @@ -3559,35 +3655,47 @@ type WANPPPConnection1 struct { goupnp.ServiceClient } -// NewWANPPPConnection1Clients discovers instances of the service on the network, +// NewWANPPPConnection1ClientsCtx discovers instances of the service on the network, // and returns clients to any that are found. errors will contain an error for // any devices that replied but which could not be queried, and err will be set // if the discovery process failed outright. // // This is a typical entry calling point into this package. -func NewWANPPPConnection1Clients() (clients []*WANPPPConnection1, errors []error, err error) { +func NewWANPPPConnection1ClientsCtx(ctx context.Context) (clients []*WANPPPConnection1, errors []error, err error) { var genericClients []goupnp.ServiceClient - if genericClients, errors, err = goupnp.NewServiceClients(URN_WANPPPConnection_1); err != nil { + if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANPPPConnection_1); err != nil { return } clients = newWANPPPConnection1ClientsFromGenericClients(genericClients) return } -// NewWANPPPConnection1ClientsByURL discovers instances of the service at the given +// NewWANPPPConnection1Clients is the legacy version of NewWANPPPConnection1ClientsCtx, but uses +// context.Background() as the context. +func NewWANPPPConnection1Clients() (clients []*WANPPPConnection1, errors []error, err error) { + return NewWANPPPConnection1ClientsCtx(context.Background()) +} + +// NewWANPPPConnection1ClientsByURLCtx discovers instances of the service at the given // URL, and returns clients to any that are found. An error is returned if // there was an error probing the service. // // This is a typical entry calling point into this package when reusing an // previously discovered service URL. -func NewWANPPPConnection1ClientsByURL(loc *url.URL) ([]*WANPPPConnection1, error) { - genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANPPPConnection_1) +func NewWANPPPConnection1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANPPPConnection1, error) { + genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANPPPConnection_1) if err != nil { return nil, err } return newWANPPPConnection1ClientsFromGenericClients(genericClients), nil } +// NewWANPPPConnection1ClientsByURL is the legacy version of NewWANPPPConnection1ClientsByURLCtx, but uses +// context.Background() as the context. +func NewWANPPPConnection1ClientsByURL(loc *url.URL) ([]*WANPPPConnection1, error) { + return NewWANPPPConnection1ClientsByURLCtx(context.Background(), loc) +} + // NewWANPPPConnection1ClientsFromRootDevice discovers instances of the service in // a given root device, and returns clients to any that are found. An error is // returned if there was not at least one instance of the service within the diff --git a/vendor/github.com/huin/goupnp/dcps/internetgateway2/gen.go b/vendor/github.com/huin/goupnp/dcps/internetgateway2/gen.go index 752058b4..88f8d77a 100644 --- a/vendor/github.com/huin/goupnp/dcps/internetgateway2/gen.go +++ b/vendor/github.com/huin/goupnp/dcps/internetgateway2/gen.go @@ -1,2 +1,2 @@ -//go:generate goupnpdcpgen -dcp_name internetgateway2 +//go:generate goupnpdcpgen -dcp_name internetgateway2 -code_tmpl_file ../dcps.gotemplate package internetgateway2 diff --git a/vendor/github.com/huin/goupnp/dcps/internetgateway2/internetgateway2.go b/vendor/github.com/huin/goupnp/dcps/internetgateway2/internetgateway2.go index e79d7824..42a15786 100644 --- a/vendor/github.com/huin/goupnp/dcps/internetgateway2/internetgateway2.go +++ b/vendor/github.com/huin/goupnp/dcps/internetgateway2/internetgateway2.go @@ -54,35 +54,47 @@ type DeviceProtection1 struct { goupnp.ServiceClient } -// NewDeviceProtection1Clients discovers instances of the service on the network, +// NewDeviceProtection1ClientsCtx discovers instances of the service on the network, // and returns clients to any that are found. errors will contain an error for // any devices that replied but which could not be queried, and err will be set // if the discovery process failed outright. // // This is a typical entry calling point into this package. -func NewDeviceProtection1Clients() (clients []*DeviceProtection1, errors []error, err error) { +func NewDeviceProtection1ClientsCtx(ctx context.Context) (clients []*DeviceProtection1, errors []error, err error) { var genericClients []goupnp.ServiceClient - if genericClients, errors, err = goupnp.NewServiceClients(URN_DeviceProtection_1); err != nil { + if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_DeviceProtection_1); err != nil { return } clients = newDeviceProtection1ClientsFromGenericClients(genericClients) return } -// NewDeviceProtection1ClientsByURL discovers instances of the service at the given +// NewDeviceProtection1Clients is the legacy version of NewDeviceProtection1ClientsCtx, but uses +// context.Background() as the context. +func NewDeviceProtection1Clients() (clients []*DeviceProtection1, errors []error, err error) { + return NewDeviceProtection1ClientsCtx(context.Background()) +} + +// NewDeviceProtection1ClientsByURLCtx discovers instances of the service at the given // URL, and returns clients to any that are found. An error is returned if // there was an error probing the service. // // This is a typical entry calling point into this package when reusing an // previously discovered service URL. -func NewDeviceProtection1ClientsByURL(loc *url.URL) ([]*DeviceProtection1, error) { - genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_DeviceProtection_1) +func NewDeviceProtection1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*DeviceProtection1, error) { + genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_DeviceProtection_1) if err != nil { return nil, err } return newDeviceProtection1ClientsFromGenericClients(genericClients), nil } +// NewDeviceProtection1ClientsByURL is the legacy version of NewDeviceProtection1ClientsByURLCtx, but uses +// context.Background() as the context. +func NewDeviceProtection1ClientsByURL(loc *url.URL) ([]*DeviceProtection1, error) { + return NewDeviceProtection1ClientsByURLCtx(context.Background(), loc) +} + // NewDeviceProtection1ClientsFromRootDevice discovers instances of the service in // a given root device, and returns clients to any that are found. An error is // returned if there was not at least one instance of the service within the @@ -672,35 +684,47 @@ type LANHostConfigManagement1 struct { goupnp.ServiceClient } -// NewLANHostConfigManagement1Clients discovers instances of the service on the network, +// NewLANHostConfigManagement1ClientsCtx discovers instances of the service on the network, // and returns clients to any that are found. errors will contain an error for // any devices that replied but which could not be queried, and err will be set // if the discovery process failed outright. // // This is a typical entry calling point into this package. -func NewLANHostConfigManagement1Clients() (clients []*LANHostConfigManagement1, errors []error, err error) { +func NewLANHostConfigManagement1ClientsCtx(ctx context.Context) (clients []*LANHostConfigManagement1, errors []error, err error) { var genericClients []goupnp.ServiceClient - if genericClients, errors, err = goupnp.NewServiceClients(URN_LANHostConfigManagement_1); err != nil { + if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_LANHostConfigManagement_1); err != nil { return } clients = newLANHostConfigManagement1ClientsFromGenericClients(genericClients) return } -// NewLANHostConfigManagement1ClientsByURL discovers instances of the service at the given +// NewLANHostConfigManagement1Clients is the legacy version of NewLANHostConfigManagement1ClientsCtx, but uses +// context.Background() as the context. +func NewLANHostConfigManagement1Clients() (clients []*LANHostConfigManagement1, errors []error, err error) { + return NewLANHostConfigManagement1ClientsCtx(context.Background()) +} + +// NewLANHostConfigManagement1ClientsByURLCtx discovers instances of the service at the given // URL, and returns clients to any that are found. An error is returned if // there was an error probing the service. // // This is a typical entry calling point into this package when reusing an // previously discovered service URL. -func NewLANHostConfigManagement1ClientsByURL(loc *url.URL) ([]*LANHostConfigManagement1, error) { - genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_LANHostConfigManagement_1) +func NewLANHostConfigManagement1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*LANHostConfigManagement1, error) { + genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_LANHostConfigManagement_1) if err != nil { return nil, err } return newLANHostConfigManagement1ClientsFromGenericClients(genericClients), nil } +// NewLANHostConfigManagement1ClientsByURL is the legacy version of NewLANHostConfigManagement1ClientsByURLCtx, but uses +// context.Background() as the context. +func NewLANHostConfigManagement1ClientsByURL(loc *url.URL) ([]*LANHostConfigManagement1, error) { + return NewLANHostConfigManagement1ClientsByURLCtx(context.Background(), loc) +} + // NewLANHostConfigManagement1ClientsFromRootDevice discovers instances of the service in // a given root device, and returns clients to any that are found. An error is // returned if there was not at least one instance of the service within the @@ -1421,35 +1445,47 @@ type Layer3Forwarding1 struct { goupnp.ServiceClient } -// NewLayer3Forwarding1Clients discovers instances of the service on the network, +// NewLayer3Forwarding1ClientsCtx discovers instances of the service on the network, // and returns clients to any that are found. errors will contain an error for // any devices that replied but which could not be queried, and err will be set // if the discovery process failed outright. // // This is a typical entry calling point into this package. -func NewLayer3Forwarding1Clients() (clients []*Layer3Forwarding1, errors []error, err error) { +func NewLayer3Forwarding1ClientsCtx(ctx context.Context) (clients []*Layer3Forwarding1, errors []error, err error) { var genericClients []goupnp.ServiceClient - if genericClients, errors, err = goupnp.NewServiceClients(URN_Layer3Forwarding_1); err != nil { + if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_Layer3Forwarding_1); err != nil { return } clients = newLayer3Forwarding1ClientsFromGenericClients(genericClients) return } -// NewLayer3Forwarding1ClientsByURL discovers instances of the service at the given +// NewLayer3Forwarding1Clients is the legacy version of NewLayer3Forwarding1ClientsCtx, but uses +// context.Background() as the context. +func NewLayer3Forwarding1Clients() (clients []*Layer3Forwarding1, errors []error, err error) { + return NewLayer3Forwarding1ClientsCtx(context.Background()) +} + +// NewLayer3Forwarding1ClientsByURLCtx discovers instances of the service at the given // URL, and returns clients to any that are found. An error is returned if // there was an error probing the service. // // This is a typical entry calling point into this package when reusing an // previously discovered service URL. -func NewLayer3Forwarding1ClientsByURL(loc *url.URL) ([]*Layer3Forwarding1, error) { - genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_Layer3Forwarding_1) +func NewLayer3Forwarding1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*Layer3Forwarding1, error) { + genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_Layer3Forwarding_1) if err != nil { return nil, err } return newLayer3Forwarding1ClientsFromGenericClients(genericClients), nil } +// NewLayer3Forwarding1ClientsByURL is the legacy version of NewLayer3Forwarding1ClientsByURLCtx, but uses +// context.Background() as the context. +func NewLayer3Forwarding1ClientsByURL(loc *url.URL) ([]*Layer3Forwarding1, error) { + return NewLayer3Forwarding1ClientsByURLCtx(context.Background(), loc) +} + // NewLayer3Forwarding1ClientsFromRootDevice discovers instances of the service in // a given root device, and returns clients to any that are found. An error is // returned if there was not at least one instance of the service within the @@ -1552,35 +1588,47 @@ type WANCableLinkConfig1 struct { goupnp.ServiceClient } -// NewWANCableLinkConfig1Clients discovers instances of the service on the network, +// NewWANCableLinkConfig1ClientsCtx discovers instances of the service on the network, // and returns clients to any that are found. errors will contain an error for // any devices that replied but which could not be queried, and err will be set // if the discovery process failed outright. // // This is a typical entry calling point into this package. -func NewWANCableLinkConfig1Clients() (clients []*WANCableLinkConfig1, errors []error, err error) { +func NewWANCableLinkConfig1ClientsCtx(ctx context.Context) (clients []*WANCableLinkConfig1, errors []error, err error) { var genericClients []goupnp.ServiceClient - if genericClients, errors, err = goupnp.NewServiceClients(URN_WANCableLinkConfig_1); err != nil { + if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANCableLinkConfig_1); err != nil { return } clients = newWANCableLinkConfig1ClientsFromGenericClients(genericClients) return } -// NewWANCableLinkConfig1ClientsByURL discovers instances of the service at the given +// NewWANCableLinkConfig1Clients is the legacy version of NewWANCableLinkConfig1ClientsCtx, but uses +// context.Background() as the context. +func NewWANCableLinkConfig1Clients() (clients []*WANCableLinkConfig1, errors []error, err error) { + return NewWANCableLinkConfig1ClientsCtx(context.Background()) +} + +// NewWANCableLinkConfig1ClientsByURLCtx discovers instances of the service at the given // URL, and returns clients to any that are found. An error is returned if // there was an error probing the service. // // This is a typical entry calling point into this package when reusing an // previously discovered service URL. -func NewWANCableLinkConfig1ClientsByURL(loc *url.URL) ([]*WANCableLinkConfig1, error) { - genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANCableLinkConfig_1) +func NewWANCableLinkConfig1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANCableLinkConfig1, error) { + genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANCableLinkConfig_1) if err != nil { return nil, err } return newWANCableLinkConfig1ClientsFromGenericClients(genericClients), nil } +// NewWANCableLinkConfig1ClientsByURL is the legacy version of NewWANCableLinkConfig1ClientsByURLCtx, but uses +// context.Background() as the context. +func NewWANCableLinkConfig1ClientsByURL(loc *url.URL) ([]*WANCableLinkConfig1, error) { + return NewWANCableLinkConfig1ClientsByURLCtx(context.Background(), loc) +} + // NewWANCableLinkConfig1ClientsFromRootDevice discovers instances of the service in // a given root device, and returns clients to any that are found. An error is // returned if there was not at least one instance of the service within the @@ -1970,35 +2018,47 @@ type WANCommonInterfaceConfig1 struct { goupnp.ServiceClient } -// NewWANCommonInterfaceConfig1Clients discovers instances of the service on the network, +// NewWANCommonInterfaceConfig1ClientsCtx discovers instances of the service on the network, // and returns clients to any that are found. errors will contain an error for // any devices that replied but which could not be queried, and err will be set // if the discovery process failed outright. // // This is a typical entry calling point into this package. -func NewWANCommonInterfaceConfig1Clients() (clients []*WANCommonInterfaceConfig1, errors []error, err error) { +func NewWANCommonInterfaceConfig1ClientsCtx(ctx context.Context) (clients []*WANCommonInterfaceConfig1, errors []error, err error) { var genericClients []goupnp.ServiceClient - if genericClients, errors, err = goupnp.NewServiceClients(URN_WANCommonInterfaceConfig_1); err != nil { + if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANCommonInterfaceConfig_1); err != nil { return } clients = newWANCommonInterfaceConfig1ClientsFromGenericClients(genericClients) return } -// NewWANCommonInterfaceConfig1ClientsByURL discovers instances of the service at the given +// NewWANCommonInterfaceConfig1Clients is the legacy version of NewWANCommonInterfaceConfig1ClientsCtx, but uses +// context.Background() as the context. +func NewWANCommonInterfaceConfig1Clients() (clients []*WANCommonInterfaceConfig1, errors []error, err error) { + return NewWANCommonInterfaceConfig1ClientsCtx(context.Background()) +} + +// NewWANCommonInterfaceConfig1ClientsByURLCtx discovers instances of the service at the given // URL, and returns clients to any that are found. An error is returned if // there was an error probing the service. // // This is a typical entry calling point into this package when reusing an // previously discovered service URL. -func NewWANCommonInterfaceConfig1ClientsByURL(loc *url.URL) ([]*WANCommonInterfaceConfig1, error) { - genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANCommonInterfaceConfig_1) +func NewWANCommonInterfaceConfig1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANCommonInterfaceConfig1, error) { + genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANCommonInterfaceConfig_1) if err != nil { return nil, err } return newWANCommonInterfaceConfig1ClientsFromGenericClients(genericClients), nil } +// NewWANCommonInterfaceConfig1ClientsByURL is the legacy version of NewWANCommonInterfaceConfig1ClientsByURLCtx, but uses +// context.Background() as the context. +func NewWANCommonInterfaceConfig1ClientsByURL(loc *url.URL) ([]*WANCommonInterfaceConfig1, error) { + return NewWANCommonInterfaceConfig1ClientsByURLCtx(context.Background(), loc) +} + // NewWANCommonInterfaceConfig1ClientsFromRootDevice discovers instances of the service in // a given root device, and returns clients to any that are found. An error is // returned if there was not at least one instance of the service within the @@ -2407,35 +2467,47 @@ type WANDSLLinkConfig1 struct { goupnp.ServiceClient } -// NewWANDSLLinkConfig1Clients discovers instances of the service on the network, +// NewWANDSLLinkConfig1ClientsCtx discovers instances of the service on the network, // and returns clients to any that are found. errors will contain an error for // any devices that replied but which could not be queried, and err will be set // if the discovery process failed outright. // // This is a typical entry calling point into this package. -func NewWANDSLLinkConfig1Clients() (clients []*WANDSLLinkConfig1, errors []error, err error) { +func NewWANDSLLinkConfig1ClientsCtx(ctx context.Context) (clients []*WANDSLLinkConfig1, errors []error, err error) { var genericClients []goupnp.ServiceClient - if genericClients, errors, err = goupnp.NewServiceClients(URN_WANDSLLinkConfig_1); err != nil { + if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANDSLLinkConfig_1); err != nil { return } clients = newWANDSLLinkConfig1ClientsFromGenericClients(genericClients) return } -// NewWANDSLLinkConfig1ClientsByURL discovers instances of the service at the given +// NewWANDSLLinkConfig1Clients is the legacy version of NewWANDSLLinkConfig1ClientsCtx, but uses +// context.Background() as the context. +func NewWANDSLLinkConfig1Clients() (clients []*WANDSLLinkConfig1, errors []error, err error) { + return NewWANDSLLinkConfig1ClientsCtx(context.Background()) +} + +// NewWANDSLLinkConfig1ClientsByURLCtx discovers instances of the service at the given // URL, and returns clients to any that are found. An error is returned if // there was an error probing the service. // // This is a typical entry calling point into this package when reusing an // previously discovered service URL. -func NewWANDSLLinkConfig1ClientsByURL(loc *url.URL) ([]*WANDSLLinkConfig1, error) { - genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANDSLLinkConfig_1) +func NewWANDSLLinkConfig1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANDSLLinkConfig1, error) { + genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANDSLLinkConfig_1) if err != nil { return nil, err } return newWANDSLLinkConfig1ClientsFromGenericClients(genericClients), nil } +// NewWANDSLLinkConfig1ClientsByURL is the legacy version of NewWANDSLLinkConfig1ClientsByURLCtx, but uses +// context.Background() as the context. +func NewWANDSLLinkConfig1ClientsByURL(loc *url.URL) ([]*WANDSLLinkConfig1, error) { + return NewWANDSLLinkConfig1ClientsByURLCtx(context.Background(), loc) +} + // NewWANDSLLinkConfig1ClientsFromRootDevice discovers instances of the service in // a given root device, and returns clients to any that are found. An error is // returned if there was not at least one instance of the service within the @@ -2827,35 +2899,47 @@ type WANEthernetLinkConfig1 struct { goupnp.ServiceClient } -// NewWANEthernetLinkConfig1Clients discovers instances of the service on the network, +// NewWANEthernetLinkConfig1ClientsCtx discovers instances of the service on the network, // and returns clients to any that are found. errors will contain an error for // any devices that replied but which could not be queried, and err will be set // if the discovery process failed outright. // // This is a typical entry calling point into this package. -func NewWANEthernetLinkConfig1Clients() (clients []*WANEthernetLinkConfig1, errors []error, err error) { +func NewWANEthernetLinkConfig1ClientsCtx(ctx context.Context) (clients []*WANEthernetLinkConfig1, errors []error, err error) { var genericClients []goupnp.ServiceClient - if genericClients, errors, err = goupnp.NewServiceClients(URN_WANEthernetLinkConfig_1); err != nil { + if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANEthernetLinkConfig_1); err != nil { return } clients = newWANEthernetLinkConfig1ClientsFromGenericClients(genericClients) return } -// NewWANEthernetLinkConfig1ClientsByURL discovers instances of the service at the given +// NewWANEthernetLinkConfig1Clients is the legacy version of NewWANEthernetLinkConfig1ClientsCtx, but uses +// context.Background() as the context. +func NewWANEthernetLinkConfig1Clients() (clients []*WANEthernetLinkConfig1, errors []error, err error) { + return NewWANEthernetLinkConfig1ClientsCtx(context.Background()) +} + +// NewWANEthernetLinkConfig1ClientsByURLCtx discovers instances of the service at the given // URL, and returns clients to any that are found. An error is returned if // there was an error probing the service. // // This is a typical entry calling point into this package when reusing an // previously discovered service URL. -func NewWANEthernetLinkConfig1ClientsByURL(loc *url.URL) ([]*WANEthernetLinkConfig1, error) { - genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANEthernetLinkConfig_1) +func NewWANEthernetLinkConfig1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANEthernetLinkConfig1, error) { + genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANEthernetLinkConfig_1) if err != nil { return nil, err } return newWANEthernetLinkConfig1ClientsFromGenericClients(genericClients), nil } +// NewWANEthernetLinkConfig1ClientsByURL is the legacy version of NewWANEthernetLinkConfig1ClientsByURLCtx, but uses +// context.Background() as the context. +func NewWANEthernetLinkConfig1ClientsByURL(loc *url.URL) ([]*WANEthernetLinkConfig1, error) { + return NewWANEthernetLinkConfig1ClientsByURLCtx(context.Background(), loc) +} + // NewWANEthernetLinkConfig1ClientsFromRootDevice discovers instances of the service in // a given root device, and returns clients to any that are found. An error is // returned if there was not at least one instance of the service within the @@ -2925,35 +3009,47 @@ type WANIPConnection1 struct { goupnp.ServiceClient } -// NewWANIPConnection1Clients discovers instances of the service on the network, +// NewWANIPConnection1ClientsCtx discovers instances of the service on the network, // and returns clients to any that are found. errors will contain an error for // any devices that replied but which could not be queried, and err will be set // if the discovery process failed outright. // // This is a typical entry calling point into this package. -func NewWANIPConnection1Clients() (clients []*WANIPConnection1, errors []error, err error) { +func NewWANIPConnection1ClientsCtx(ctx context.Context) (clients []*WANIPConnection1, errors []error, err error) { var genericClients []goupnp.ServiceClient - if genericClients, errors, err = goupnp.NewServiceClients(URN_WANIPConnection_1); err != nil { + if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANIPConnection_1); err != nil { return } clients = newWANIPConnection1ClientsFromGenericClients(genericClients) return } -// NewWANIPConnection1ClientsByURL discovers instances of the service at the given +// NewWANIPConnection1Clients is the legacy version of NewWANIPConnection1ClientsCtx, but uses +// context.Background() as the context. +func NewWANIPConnection1Clients() (clients []*WANIPConnection1, errors []error, err error) { + return NewWANIPConnection1ClientsCtx(context.Background()) +} + +// NewWANIPConnection1ClientsByURLCtx discovers instances of the service at the given // URL, and returns clients to any that are found. An error is returned if // there was an error probing the service. // // This is a typical entry calling point into this package when reusing an // previously discovered service URL. -func NewWANIPConnection1ClientsByURL(loc *url.URL) ([]*WANIPConnection1, error) { - genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANIPConnection_1) +func NewWANIPConnection1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANIPConnection1, error) { + genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANIPConnection_1) if err != nil { return nil, err } return newWANIPConnection1ClientsFromGenericClients(genericClients), nil } +// NewWANIPConnection1ClientsByURL is the legacy version of NewWANIPConnection1ClientsByURLCtx, but uses +// context.Background() as the context. +func NewWANIPConnection1ClientsByURL(loc *url.URL) ([]*WANIPConnection1, error) { + return NewWANIPConnection1ClientsByURLCtx(context.Background(), loc) +} + // NewWANIPConnection1ClientsFromRootDevice discovers instances of the service in // a given root device, and returns clients to any that are found. An error is // returned if there was not at least one instance of the service within the @@ -3771,35 +3867,47 @@ type WANIPConnection2 struct { goupnp.ServiceClient } -// NewWANIPConnection2Clients discovers instances of the service on the network, +// NewWANIPConnection2ClientsCtx discovers instances of the service on the network, // and returns clients to any that are found. errors will contain an error for // any devices that replied but which could not be queried, and err will be set // if the discovery process failed outright. // // This is a typical entry calling point into this package. -func NewWANIPConnection2Clients() (clients []*WANIPConnection2, errors []error, err error) { +func NewWANIPConnection2ClientsCtx(ctx context.Context) (clients []*WANIPConnection2, errors []error, err error) { var genericClients []goupnp.ServiceClient - if genericClients, errors, err = goupnp.NewServiceClients(URN_WANIPConnection_2); err != nil { + if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANIPConnection_2); err != nil { return } clients = newWANIPConnection2ClientsFromGenericClients(genericClients) return } -// NewWANIPConnection2ClientsByURL discovers instances of the service at the given +// NewWANIPConnection2Clients is the legacy version of NewWANIPConnection2ClientsCtx, but uses +// context.Background() as the context. +func NewWANIPConnection2Clients() (clients []*WANIPConnection2, errors []error, err error) { + return NewWANIPConnection2ClientsCtx(context.Background()) +} + +// NewWANIPConnection2ClientsByURLCtx discovers instances of the service at the given // URL, and returns clients to any that are found. An error is returned if // there was an error probing the service. // // This is a typical entry calling point into this package when reusing an // previously discovered service URL. -func NewWANIPConnection2ClientsByURL(loc *url.URL) ([]*WANIPConnection2, error) { - genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANIPConnection_2) +func NewWANIPConnection2ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANIPConnection2, error) { + genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANIPConnection_2) if err != nil { return nil, err } return newWANIPConnection2ClientsFromGenericClients(genericClients), nil } +// NewWANIPConnection2ClientsByURL is the legacy version of NewWANIPConnection2ClientsByURLCtx, but uses +// context.Background() as the context. +func NewWANIPConnection2ClientsByURL(loc *url.URL) ([]*WANIPConnection2, error) { + return NewWANIPConnection2ClientsByURLCtx(context.Background(), loc) +} + // NewWANIPConnection2ClientsFromRootDevice discovers instances of the service in // a given root device, and returns clients to any that are found. An error is // returned if there was not at least one instance of the service within the @@ -4833,35 +4941,47 @@ type WANIPv6FirewallControl1 struct { goupnp.ServiceClient } -// NewWANIPv6FirewallControl1Clients discovers instances of the service on the network, +// NewWANIPv6FirewallControl1ClientsCtx discovers instances of the service on the network, // and returns clients to any that are found. errors will contain an error for // any devices that replied but which could not be queried, and err will be set // if the discovery process failed outright. // // This is a typical entry calling point into this package. -func NewWANIPv6FirewallControl1Clients() (clients []*WANIPv6FirewallControl1, errors []error, err error) { +func NewWANIPv6FirewallControl1ClientsCtx(ctx context.Context) (clients []*WANIPv6FirewallControl1, errors []error, err error) { var genericClients []goupnp.ServiceClient - if genericClients, errors, err = goupnp.NewServiceClients(URN_WANIPv6FirewallControl_1); err != nil { + if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANIPv6FirewallControl_1); err != nil { return } clients = newWANIPv6FirewallControl1ClientsFromGenericClients(genericClients) return } -// NewWANIPv6FirewallControl1ClientsByURL discovers instances of the service at the given +// NewWANIPv6FirewallControl1Clients is the legacy version of NewWANIPv6FirewallControl1ClientsCtx, but uses +// context.Background() as the context. +func NewWANIPv6FirewallControl1Clients() (clients []*WANIPv6FirewallControl1, errors []error, err error) { + return NewWANIPv6FirewallControl1ClientsCtx(context.Background()) +} + +// NewWANIPv6FirewallControl1ClientsByURLCtx discovers instances of the service at the given // URL, and returns clients to any that are found. An error is returned if // there was an error probing the service. // // This is a typical entry calling point into this package when reusing an // previously discovered service URL. -func NewWANIPv6FirewallControl1ClientsByURL(loc *url.URL) ([]*WANIPv6FirewallControl1, error) { - genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANIPv6FirewallControl_1) +func NewWANIPv6FirewallControl1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANIPv6FirewallControl1, error) { + genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANIPv6FirewallControl_1) if err != nil { return nil, err } return newWANIPv6FirewallControl1ClientsFromGenericClients(genericClients), nil } +// NewWANIPv6FirewallControl1ClientsByURL is the legacy version of NewWANIPv6FirewallControl1ClientsByURLCtx, but uses +// context.Background() as the context. +func NewWANIPv6FirewallControl1ClientsByURL(loc *url.URL) ([]*WANIPv6FirewallControl1, error) { + return NewWANIPv6FirewallControl1ClientsByURLCtx(context.Background(), loc) +} + // NewWANIPv6FirewallControl1ClientsFromRootDevice discovers instances of the service in // a given root device, and returns clients to any that are found. An error is // returned if there was not at least one instance of the service within the @@ -5243,35 +5363,47 @@ type WANPOTSLinkConfig1 struct { goupnp.ServiceClient } -// NewWANPOTSLinkConfig1Clients discovers instances of the service on the network, +// NewWANPOTSLinkConfig1ClientsCtx discovers instances of the service on the network, // and returns clients to any that are found. errors will contain an error for // any devices that replied but which could not be queried, and err will be set // if the discovery process failed outright. // // This is a typical entry calling point into this package. -func NewWANPOTSLinkConfig1Clients() (clients []*WANPOTSLinkConfig1, errors []error, err error) { +func NewWANPOTSLinkConfig1ClientsCtx(ctx context.Context) (clients []*WANPOTSLinkConfig1, errors []error, err error) { var genericClients []goupnp.ServiceClient - if genericClients, errors, err = goupnp.NewServiceClients(URN_WANPOTSLinkConfig_1); err != nil { + if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANPOTSLinkConfig_1); err != nil { return } clients = newWANPOTSLinkConfig1ClientsFromGenericClients(genericClients) return } -// NewWANPOTSLinkConfig1ClientsByURL discovers instances of the service at the given +// NewWANPOTSLinkConfig1Clients is the legacy version of NewWANPOTSLinkConfig1ClientsCtx, but uses +// context.Background() as the context. +func NewWANPOTSLinkConfig1Clients() (clients []*WANPOTSLinkConfig1, errors []error, err error) { + return NewWANPOTSLinkConfig1ClientsCtx(context.Background()) +} + +// NewWANPOTSLinkConfig1ClientsByURLCtx discovers instances of the service at the given // URL, and returns clients to any that are found. An error is returned if // there was an error probing the service. // // This is a typical entry calling point into this package when reusing an // previously discovered service URL. -func NewWANPOTSLinkConfig1ClientsByURL(loc *url.URL) ([]*WANPOTSLinkConfig1, error) { - genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANPOTSLinkConfig_1) +func NewWANPOTSLinkConfig1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANPOTSLinkConfig1, error) { + genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANPOTSLinkConfig_1) if err != nil { return nil, err } return newWANPOTSLinkConfig1ClientsFromGenericClients(genericClients), nil } +// NewWANPOTSLinkConfig1ClientsByURL is the legacy version of NewWANPOTSLinkConfig1ClientsByURLCtx, but uses +// context.Background() as the context. +func NewWANPOTSLinkConfig1ClientsByURL(loc *url.URL) ([]*WANPOTSLinkConfig1, error) { + return NewWANPOTSLinkConfig1ClientsByURLCtx(context.Background(), loc) +} + // NewWANPOTSLinkConfig1ClientsFromRootDevice discovers instances of the service in // a given root device, and returns clients to any that are found. An error is // returned if there was not at least one instance of the service within the @@ -5654,35 +5786,47 @@ type WANPPPConnection1 struct { goupnp.ServiceClient } -// NewWANPPPConnection1Clients discovers instances of the service on the network, +// NewWANPPPConnection1ClientsCtx discovers instances of the service on the network, // and returns clients to any that are found. errors will contain an error for // any devices that replied but which could not be queried, and err will be set // if the discovery process failed outright. // // This is a typical entry calling point into this package. -func NewWANPPPConnection1Clients() (clients []*WANPPPConnection1, errors []error, err error) { +func NewWANPPPConnection1ClientsCtx(ctx context.Context) (clients []*WANPPPConnection1, errors []error, err error) { var genericClients []goupnp.ServiceClient - if genericClients, errors, err = goupnp.NewServiceClients(URN_WANPPPConnection_1); err != nil { + if genericClients, errors, err = goupnp.NewServiceClientsCtx(ctx, URN_WANPPPConnection_1); err != nil { return } clients = newWANPPPConnection1ClientsFromGenericClients(genericClients) return } -// NewWANPPPConnection1ClientsByURL discovers instances of the service at the given +// NewWANPPPConnection1Clients is the legacy version of NewWANPPPConnection1ClientsCtx, but uses +// context.Background() as the context. +func NewWANPPPConnection1Clients() (clients []*WANPPPConnection1, errors []error, err error) { + return NewWANPPPConnection1ClientsCtx(context.Background()) +} + +// NewWANPPPConnection1ClientsByURLCtx discovers instances of the service at the given // URL, and returns clients to any that are found. An error is returned if // there was an error probing the service. // // This is a typical entry calling point into this package when reusing an // previously discovered service URL. -func NewWANPPPConnection1ClientsByURL(loc *url.URL) ([]*WANPPPConnection1, error) { - genericClients, err := goupnp.NewServiceClientsByURL(loc, URN_WANPPPConnection_1) +func NewWANPPPConnection1ClientsByURLCtx(ctx context.Context, loc *url.URL) ([]*WANPPPConnection1, error) { + genericClients, err := goupnp.NewServiceClientsByURLCtx(ctx, loc, URN_WANPPPConnection_1) if err != nil { return nil, err } return newWANPPPConnection1ClientsFromGenericClients(genericClients), nil } +// NewWANPPPConnection1ClientsByURL is the legacy version of NewWANPPPConnection1ClientsByURLCtx, but uses +// context.Background() as the context. +func NewWANPPPConnection1ClientsByURL(loc *url.URL) ([]*WANPPPConnection1, error) { + return NewWANPPPConnection1ClientsByURLCtx(context.Background(), loc) +} + // NewWANPPPConnection1ClientsFromRootDevice discovers instances of the service in // a given root device, and returns clients to any that are found. An error is // returned if there was not at least one instance of the service within the diff --git a/vendor/github.com/huin/goupnp/device.go b/vendor/github.com/huin/goupnp/device.go index 334e787c..65f5635a 100644 --- a/vendor/github.com/huin/goupnp/device.go +++ b/vendor/github.com/huin/goupnp/device.go @@ -3,6 +3,7 @@ package goupnp import ( + "context" "encoding/xml" "errors" "fmt" @@ -51,6 +52,7 @@ type Device struct { ModelDescription string `xml:"modelDescription"` ModelName string `xml:"modelName"` ModelNumber string `xml:"modelNumber"` + ModelType string `xml:"modelType"` ModelURL URLField `xml:"modelURL"` SerialNumber string `xml:"serialNumber"` UDN string `xml:"UDN"` @@ -148,19 +150,25 @@ func (srv *Service) String() string { return fmt.Sprintf("Service ID %s : %s", srv.ServiceId, srv.ServiceType) } -// RequestSCPD requests the SCPD (soap actions and state variables description) +// RequestSCPDCtx requests the SCPD (soap actions and state variables description) // for the service. -func (srv *Service) RequestSCPD() (*scpd.SCPD, error) { +func (srv *Service) RequestSCPDCtx(ctx context.Context) (*scpd.SCPD, error) { if !srv.SCPDURL.Ok { return nil, errors.New("bad/missing SCPD URL, or no URLBase has been set") } s := new(scpd.SCPD) - if err := requestXml(srv.SCPDURL.URL.String(), scpd.SCPDXMLNamespace, s); err != nil { + if err := requestXml(ctx, srv.SCPDURL.URL.String(), scpd.SCPDXMLNamespace, s); err != nil { return nil, err } return s, nil } +// RequestSCPD is the legacy version of RequestSCPDCtx, but uses +// context.Background() as the context. +func (srv *Service) RequestSCPD() (*scpd.SCPD, error) { + return srv.RequestSCPDCtx(context.Background()) +} + // RequestSCDP is for compatibility only, prefer RequestSCPD. This was a // misspelling of RequestSCDP. func (srv *Service) RequestSCDP() (*scpd.SCPD, error) { diff --git a/vendor/github.com/huin/goupnp/go.work b/vendor/github.com/huin/goupnp/go.work new file mode 100644 index 00000000..9b7d1ff7 --- /dev/null +++ b/vendor/github.com/huin/goupnp/go.work @@ -0,0 +1,6 @@ +go 1.18 + +use ( + . + ./v2alpha +) diff --git a/vendor/github.com/huin/goupnp/goupnp.go b/vendor/github.com/huin/goupnp/goupnp.go index 51963de3..93c588b0 100644 --- a/vendor/github.com/huin/goupnp/goupnp.go +++ b/vendor/github.com/huin/goupnp/goupnp.go @@ -15,6 +15,7 @@ package goupnp import ( + "context" "encoding/xml" "fmt" "io" @@ -72,19 +73,19 @@ type MaybeRootDevice struct { Err error } -// DiscoverDevices attempts to find targets of the given type. This is +// DiscoverDevicesCtx attempts to find targets of the given type. This is // typically the entry-point for this package. searchTarget is typically a URN // in the form "urn:schemas-upnp-org:device:..." or // "urn:schemas-upnp-org:service:...". A single error is returned for errors // while attempting to send the query. An error or RootDevice is returned for // each discovered RootDevice. -func DiscoverDevices(searchTarget string) ([]MaybeRootDevice, error) { +func DiscoverDevicesCtx(ctx context.Context, searchTarget string) ([]MaybeRootDevice, error) { hc, hcCleanup, err := httpuClient() if err != nil { return nil, err } defer hcCleanup() - responses, err := ssdp.SSDPRawSearch(hc, string(searchTarget), 2, 3) + responses, err := ssdp.SSDPRawSearchCtx(ctx, hc, string(searchTarget), 2, 3) if err != nil { return nil, err } @@ -99,7 +100,7 @@ func DiscoverDevices(searchTarget string) ([]MaybeRootDevice, error) { continue } maybe.Location = loc - if root, err := DeviceByURL(loc); err != nil { + if root, err := DeviceByURLCtx(ctx, loc); err != nil { maybe.Err = err } else { maybe.Root = root @@ -112,10 +113,16 @@ func DiscoverDevices(searchTarget string) ([]MaybeRootDevice, error) { return results, nil } -func DeviceByURL(loc *url.URL) (*RootDevice, error) { +// DiscoverDevices is the legacy version of DiscoverDevicesCtx, but uses +// context.Background() as the context. +func DiscoverDevices(searchTarget string) ([]MaybeRootDevice, error) { + return DiscoverDevicesCtx(context.Background(), searchTarget) +} + +func DeviceByURLCtx(ctx context.Context, loc *url.URL) (*RootDevice, error) { locStr := loc.String() root := new(RootDevice) - if err := requestXml(locStr, DeviceXMLNamespace, root); err != nil { + if err := requestXml(ctx, locStr, DeviceXMLNamespace, root); err != nil { return nil, ContextError{fmt.Sprintf("error requesting root device details from %q", locStr), err} } var urlBaseStr string @@ -132,17 +139,29 @@ func DeviceByURL(loc *url.URL) (*RootDevice, error) { return root, nil } +func DeviceByURL(loc *url.URL) (*RootDevice, error) { + return DeviceByURLCtx(context.Background(), loc) +} + // CharsetReaderDefault specifies the charset reader used while decoding the output // from a UPnP server. It can be modified in an init function to allow for non-utf8 encodings, // but should not be changed after requesting clients. var CharsetReaderDefault func(charset string, input io.Reader) (io.Reader, error) -func requestXml(url string, defaultSpace string, doc interface{}) error { - timeout := time.Duration(3 * time.Second) - client := http.Client{ - Timeout: timeout, +// HTTPClient specifies the http.Client object used when fetching the XML from the UPnP server. +// HTTPClient defaults the http.DefaultClient. This may be overridden by the importing application. +var HTTPClientDefault = http.DefaultClient + +func requestXml(ctx context.Context, url string, defaultSpace string, doc interface{}) error { + ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return err } - resp, err := client.Get(url) + + resp, err := HTTPClientDefault.Do(req) if err != nil { return err } diff --git a/vendor/github.com/huin/goupnp/httpu/serve.go b/vendor/github.com/huin/goupnp/httpu/serve.go index 9f67af85..bac3296f 100644 --- a/vendor/github.com/huin/goupnp/httpu/serve.go +++ b/vendor/github.com/huin/goupnp/httpu/serve.go @@ -7,6 +7,7 @@ import ( "net" "net/http" "regexp" + "sync" ) const ( @@ -73,20 +74,25 @@ func (srv *Server) Serve(l net.PacketConn) error { if srv.MaxMessageBytes != 0 { maxMessageBytes = srv.MaxMessageBytes } + + bufPool := &sync.Pool{ + New: func() interface{} { + return make([]byte, maxMessageBytes) + }, + } for { - buf := make([]byte, maxMessageBytes) + buf := bufPool.Get().([]byte) n, peerAddr, err := l.ReadFrom(buf) if err != nil { return err } - buf = buf[:n] - - go func(buf []byte, peerAddr net.Addr) { + go func() { + defer bufPool.Put(buf) // At least one router's UPnP implementation has added a trailing space // after "HTTP/1.1" - trim it. - buf = trailingWhitespaceRx.ReplaceAllLiteral(buf, crlf) + reqBuf := trailingWhitespaceRx.ReplaceAllLiteral(buf[:n], crlf) - req, err := http.ReadRequest(bufio.NewReader(bytes.NewBuffer(buf))) + req, err := http.ReadRequest(bufio.NewReader(bytes.NewBuffer(reqBuf))) if err != nil { log.Printf("httpu: Failed to parse request: %v", err) return @@ -94,7 +100,7 @@ func (srv *Server) Serve(l net.PacketConn) error { req.RemoteAddr = peerAddr.String() srv.Handler.ServeMessage(req) // No need to call req.Body.Close - underlying reader is bytes.Buffer. - }(buf, peerAddr) + }() } } diff --git a/vendor/github.com/huin/goupnp/service_client.go b/vendor/github.com/huin/goupnp/service_client.go index 79a375d5..cb65c19e 100644 --- a/vendor/github.com/huin/goupnp/service_client.go +++ b/vendor/github.com/huin/goupnp/service_client.go @@ -1,6 +1,7 @@ package goupnp import ( + "context" "fmt" "net" "net/url" @@ -21,12 +22,12 @@ type ServiceClient struct { localAddr net.IP } -// NewServiceClients discovers services, and returns clients for them. err will +// NewServiceClientsCtx discovers services, and returns clients for them. err will // report any error with the discovery process (blocking any device/service // discovery), errors reports errors on a per-root-device basis. -func NewServiceClients(searchTarget string) (clients []ServiceClient, errors []error, err error) { +func NewServiceClientsCtx(ctx context.Context, searchTarget string) (clients []ServiceClient, errors []error, err error) { var maybeRootDevices []MaybeRootDevice - if maybeRootDevices, err = DiscoverDevices(searchTarget); err != nil { + if maybeRootDevices, err = DiscoverDevicesCtx(ctx, searchTarget); err != nil { return } @@ -49,16 +50,28 @@ func NewServiceClients(searchTarget string) (clients []ServiceClient, errors []e return } -// NewServiceClientsByURL creates client(s) for the given service URN, for a +// NewServiceClients is the legacy version of NewServiceClientsCtx, but uses +// context.Background() as the context. +func NewServiceClients(searchTarget string) (clients []ServiceClient, errors []error, err error) { + return NewServiceClientsCtx(context.Background(), searchTarget) +} + +// NewServiceClientsByURLCtx creates client(s) for the given service URN, for a // root device at the given URL. -func NewServiceClientsByURL(loc *url.URL, searchTarget string) ([]ServiceClient, error) { - rootDevice, err := DeviceByURL(loc) +func NewServiceClientsByURLCtx(ctx context.Context, loc *url.URL, searchTarget string) ([]ServiceClient, error) { + rootDevice, err := DeviceByURLCtx(ctx, loc) if err != nil { return nil, err } return NewServiceClientsFromRootDevice(rootDevice, loc, searchTarget) } +// NewServiceClientsByURL is the legacy version of NewServiceClientsByURLCtx, but uses +// context.Background() as the context. +func NewServiceClientsByURL(loc *url.URL, searchTarget string) ([]ServiceClient, error) { + return NewServiceClientsByURLCtx(context.Background(), loc, searchTarget) +} + // NewServiceClientsFromDevice creates client(s) for the given service URN, in // a given root device. The loc parameter is simply assigned to the // Location attribute of the returned ServiceClient(s). diff --git a/vendor/github.com/huin/goupnp/soap/types.go b/vendor/github.com/huin/goupnp/soap/types.go index 3e73d99d..b54b2168 100644 --- a/vendor/github.com/huin/goupnp/soap/types.go +++ b/vendor/github.com/huin/goupnp/soap/types.go @@ -526,3 +526,53 @@ func MarshalURI(v *url.URL) (string, error) { func UnmarshalURI(s string) (*url.URL, error) { return url.Parse(s) } + +// TypeData provides metadata about for marshalling and unmarshalling a SOAP +// type. +type TypeData struct { + funcSuffix string + goType string +} + +// GoTypeName returns the name of the Go type. +func (td TypeData) GoTypeName() string { + return td.goType +} + +// MarshalFunc returns the name of the function that marshals the type. +func (td TypeData) MarshalFunc() string { + return fmt.Sprintf("Marshal%s", td.funcSuffix) +} + +// UnmarshalFunc returns the name of the function that unmarshals the type. +func (td TypeData) UnmarshalFunc() string { + return fmt.Sprintf("Unmarshal%s", td.funcSuffix) +} + +// TypeDataMap maps from a SOAP type (e.g "fixed.14.4") to its type data. +var TypeDataMap = map[string]TypeData{ + "ui1": {"Ui1", "uint8"}, + "ui2": {"Ui2", "uint16"}, + "ui4": {"Ui4", "uint32"}, + "ui8": {"Ui8", "uint64"}, + "i1": {"I1", "int8"}, + "i2": {"I2", "int16"}, + "i4": {"I4", "int32"}, + "int": {"Int", "int64"}, + "r4": {"R4", "float32"}, + "r8": {"R8", "float64"}, + "number": {"R8", "float64"}, // Alias for r8. + "fixed.14.4": {"Fixed14_4", "float64"}, + "float": {"R8", "float64"}, + "char": {"Char", "rune"}, + "string": {"String", "string"}, + "date": {"Date", "time.Time"}, + "dateTime": {"DateTime", "time.Time"}, + "dateTime.tz": {"DateTimeTz", "time.Time"}, + "time": {"TimeOfDay", "soap.TimeOfDay"}, + "time.tz": {"TimeOfDayTz", "soap.TimeOfDay"}, + "boolean": {"Boolean", "bool"}, + "bin.base64": {"BinBase64", "[]byte"}, + "bin.hex": {"BinHex", "[]byte"}, + "uri": {"URI", "*url.URL"}, +} diff --git a/vendor/github.com/huin/goupnp/ssdp/ssdp.go b/vendor/github.com/huin/goupnp/ssdp/ssdp.go index 85e106cb..240dfa73 100644 --- a/vendor/github.com/huin/goupnp/ssdp/ssdp.go +++ b/vendor/github.com/huin/goupnp/ssdp/ssdp.go @@ -1,6 +1,7 @@ package ssdp import ( + "context" "errors" "log" "net/http" @@ -34,14 +35,15 @@ type HTTPUClient interface { ) ([]*http.Response, error) } -// SSDPRawSearch performs a fairly raw SSDP search request, and returns the +// SSDPRawSearchCtx performs a fairly raw SSDP search request, and returns the // unique response(s) that it receives. Each response has the requested // searchTarget, a USN, and a valid location. maxWaitSeconds states how long to // wait for responses in seconds, and must be a minimum of 1 (the // implementation waits an additional 100ms for responses to arrive), 2 is a // reasonable value for this. numSends is the number of requests to send - 3 is // a reasonable value for this. -func SSDPRawSearch( +func SSDPRawSearchCtx( + ctx context.Context, httpu HTTPUClient, searchTarget string, maxWaitSeconds int, @@ -51,7 +53,7 @@ func SSDPRawSearch( return nil, errors.New("ssdp: maxWaitSeconds must be >= 1") } - req := http.Request{ + req := (&http.Request{ Method: methodSearch, // TODO: Support both IPv4 and IPv6. Host: ssdpUDP4Addr, @@ -64,8 +66,8 @@ func SSDPRawSearch( "MAN": []string{ssdpDiscover}, "ST": []string{searchTarget}, }, - } - allResponses, err := httpu.Do(&req, time.Duration(maxWaitSeconds)*time.Second+100*time.Millisecond, numSends) + }).WithContext(ctx) + allResponses, err := httpu.Do(req, time.Duration(maxWaitSeconds)*time.Second+100*time.Millisecond, numSends) if err != nil { return nil, err } @@ -97,3 +99,9 @@ func SSDPRawSearch( return responses, nil } + +// SSDPRawSearch is the legacy version of SSDPRawSearchCtx, but uses +// context.Background() as the context. +func SSDPRawSearch(httpu HTTPUClient, searchTarget string, maxWaitSeconds int, numSends int) ([]*http.Response, error) { + return SSDPRawSearchCtx(context.Background(), httpu, searchTarget, maxWaitSeconds, numSends) +} diff --git a/vendor/github.com/huin/goupnp/workspace.code-workspace b/vendor/github.com/huin/goupnp/workspace.code-workspace new file mode 100644 index 00000000..7d337cad --- /dev/null +++ b/vendor/github.com/huin/goupnp/workspace.code-workspace @@ -0,0 +1,11 @@ +{ + "folders": [ + { + "path": "." + }, + { + "path": "v2alpha" + } + ], + "settings": {} +} diff --git a/vendor/github.com/ipfs/go-cid/README.md b/vendor/github.com/ipfs/go-cid/README.md index 89da0412..70c3e5c7 100644 --- a/vendor/github.com/ipfs/go-cid/README.md +++ b/vendor/github.com/ipfs/go-cid/README.md @@ -69,7 +69,7 @@ import ( // Create a cid manually by specifying the 'prefix' parameters pref := cid.Prefix{ Version: 1, - Codec: mc.Raw, + Codec: uint64(mc.Raw), MhType: mh.SHA2_256, MhLength: -1, // default length } diff --git a/vendor/github.com/ipfs/go-cid/cid.go b/vendor/github.com/ipfs/go-cid/cid.go index 651c94df..f1824248 100644 --- a/vendor/github.com/ipfs/go-cid/cid.go +++ b/vendor/github.com/ipfs/go-cid/cid.go @@ -37,10 +37,32 @@ import ( // UnsupportedVersionString just holds an error message const UnsupportedVersionString = "" +// ErrInvalidCid is an error that indicates that a CID is invalid. +type ErrInvalidCid struct { + Err error +} + +func (e ErrInvalidCid) Error() string { + return fmt.Sprintf("invalid cid: %s", e.Err) +} + +func (e ErrInvalidCid) Unwrap() error { + return e.Err +} + +func (e ErrInvalidCid) Is(err error) bool { + switch err.(type) { + case ErrInvalidCid, *ErrInvalidCid: + return true + default: + return false + } +} + var ( // ErrCidTooShort means that the cid passed to decode was not long // enough to be a valid Cid - ErrCidTooShort = errors.New("cid too short") + ErrCidTooShort = ErrInvalidCid{errors.New("cid too short")} // ErrInvalidEncoding means that selected encoding is not supported // by this Cid version @@ -90,10 +112,10 @@ func tryNewCidV0(mhash mh.Multihash) (Cid, error) { // incorrectly detect it as CidV1 in the Version() method dec, err := mh.Decode(mhash) if err != nil { - return Undef, err + return Undef, ErrInvalidCid{err} } if dec.Code != mh.SHA2_256 || dec.Length != 32 { - return Undef, fmt.Errorf("invalid hash for cidv0 %d-%d", dec.Code, dec.Length) + return Undef, ErrInvalidCid{fmt.Errorf("invalid hash for cidv0 %d-%d", dec.Code, dec.Length)} } return Cid{string(mhash)}, nil } @@ -177,7 +199,7 @@ func Parse(v interface{}) (Cid, error) { case Cid: return v2, nil default: - return Undef, fmt.Errorf("can't parse %+v as Cid", v2) + return Undef, ErrInvalidCid{fmt.Errorf("can't parse %+v as Cid", v2)} } } @@ -210,7 +232,7 @@ func Decode(v string) (Cid, error) { if len(v) == 46 && v[:2] == "Qm" { hash, err := mh.FromB58String(v) if err != nil { - return Undef, err + return Undef, ErrInvalidCid{err} } return tryNewCidV0(hash) @@ -218,7 +240,7 @@ func Decode(v string) (Cid, error) { _, data, err := mbase.Decode(v) if err != nil { - return Undef, err + return Undef, ErrInvalidCid{err} } return Cast(data) @@ -240,7 +262,7 @@ func ExtractEncoding(v string) (mbase.Encoding, error) { // check encoding is valid _, err := mbase.NewEncoder(encoding) if err != nil { - return -1, err + return -1, ErrInvalidCid{err} } return encoding, nil @@ -260,11 +282,11 @@ func ExtractEncoding(v string) (mbase.Encoding, error) { func Cast(data []byte) (Cid, error) { nr, c, err := CidFromBytes(data) if err != nil { - return Undef, err + return Undef, ErrInvalidCid{err} } if nr != len(data) { - return Undef, fmt.Errorf("trailing bytes in data buffer passed to cid Cast") + return Undef, ErrInvalidCid{fmt.Errorf("trailing bytes in data buffer passed to cid Cast")} } return c, nil @@ -434,7 +456,7 @@ func (c Cid) Equals(o Cid) bool { // UnmarshalJSON parses the JSON representation of a Cid. func (c *Cid) UnmarshalJSON(b []byte) error { if len(b) < 2 { - return fmt.Errorf("invalid cid json blob") + return ErrInvalidCid{fmt.Errorf("invalid cid json blob")} } obj := struct { CidTarget string `json:"/"` @@ -442,7 +464,7 @@ func (c *Cid) UnmarshalJSON(b []byte) error { objptr := &obj err := json.Unmarshal(b, &objptr) if err != nil { - return err + return ErrInvalidCid{err} } if objptr == nil { *c = Cid{} @@ -450,12 +472,12 @@ func (c *Cid) UnmarshalJSON(b []byte) error { } if obj.CidTarget == "" { - return fmt.Errorf("cid was incorrectly formatted") + return ErrInvalidCid{fmt.Errorf("cid was incorrectly formatted")} } out, err := Decode(obj.CidTarget) if err != nil { - return err + return ErrInvalidCid{err} } *c = out @@ -542,12 +564,12 @@ func (p Prefix) Sum(data []byte) (Cid, error) { if p.Version == 0 && (p.MhType != mh.SHA2_256 || (p.MhLength != 32 && p.MhLength != -1)) { - return Undef, fmt.Errorf("invalid v0 prefix") + return Undef, ErrInvalidCid{fmt.Errorf("invalid v0 prefix")} } hash, err := mh.Sum(data, p.MhType, length) if err != nil { - return Undef, err + return Undef, ErrInvalidCid{err} } switch p.Version { @@ -556,7 +578,7 @@ func (p Prefix) Sum(data []byte) (Cid, error) { case 1: return NewCidV1(p.Codec, hash), nil default: - return Undef, fmt.Errorf("invalid cid version") + return Undef, ErrInvalidCid{fmt.Errorf("invalid cid version")} } } @@ -586,22 +608,22 @@ func PrefixFromBytes(buf []byte) (Prefix, error) { r := bytes.NewReader(buf) vers, err := varint.ReadUvarint(r) if err != nil { - return Prefix{}, err + return Prefix{}, ErrInvalidCid{err} } codec, err := varint.ReadUvarint(r) if err != nil { - return Prefix{}, err + return Prefix{}, ErrInvalidCid{err} } mhtype, err := varint.ReadUvarint(r) if err != nil { - return Prefix{}, err + return Prefix{}, ErrInvalidCid{err} } mhlen, err := varint.ReadUvarint(r) if err != nil { - return Prefix{}, err + return Prefix{}, ErrInvalidCid{err} } return Prefix{ @@ -615,12 +637,12 @@ func PrefixFromBytes(buf []byte) (Prefix, error) { func CidFromBytes(data []byte) (int, Cid, error) { if len(data) > 2 && data[0] == mh.SHA2_256 && data[1] == 32 { if len(data) < 34 { - return 0, Undef, fmt.Errorf("not enough bytes for cid v0") + return 0, Undef, ErrInvalidCid{fmt.Errorf("not enough bytes for cid v0")} } h, err := mh.Cast(data[:34]) if err != nil { - return 0, Undef, err + return 0, Undef, ErrInvalidCid{err} } return 34, Cid{string(h)}, nil @@ -628,21 +650,21 @@ func CidFromBytes(data []byte) (int, Cid, error) { vers, n, err := varint.FromUvarint(data) if err != nil { - return 0, Undef, err + return 0, Undef, ErrInvalidCid{err} } if vers != 1 { - return 0, Undef, fmt.Errorf("expected 1 as the cid version number, got: %d", vers) + return 0, Undef, ErrInvalidCid{fmt.Errorf("expected 1 as the cid version number, got: %d", vers)} } _, cn, err := varint.FromUvarint(data[n:]) if err != nil { - return 0, Undef, err + return 0, Undef, ErrInvalidCid{err} } mhnr, _, err := mh.MHFromBytes(data[n+cn:]) if err != nil { - return 0, Undef, err + return 0, Undef, ErrInvalidCid{err} } l := n + cn + mhnr @@ -695,6 +717,9 @@ func (r *bufByteReader) ReadByte() (byte, error) { // It's recommended to supply a reader that buffers and implements io.ByteReader, // as CidFromReader has to do many single-byte reads to decode varints. // If the argument only implements io.Reader, single-byte Read calls are used instead. +// +// If the Reader is found to yield zero bytes, an io.EOF error is returned directly, in all +// other error cases, an ErrInvalidCid, wrapping the original error, is returned. func CidFromReader(r io.Reader) (int, Cid, error) { // 64 bytes is enough for any CIDv0, // and it's enough for most CIDv1s in practice. @@ -705,32 +730,37 @@ func CidFromReader(r io.Reader) (int, Cid, error) { // The varint package wants a io.ByteReader, so we must wrap our io.Reader. vers, err := varint.ReadUvarint(br) if err != nil { - return len(br.dst), Undef, err + if err == io.EOF { + // First-byte read in ReadUvarint errors with io.EOF, so reader has no data. + // Subsequent reads with an EOF will return io.ErrUnexpectedEOF and be wrapped here. + return 0, Undef, err + } + return len(br.dst), Undef, ErrInvalidCid{err} } // If we have a CIDv0, read the rest of the bytes and cast the buffer. if vers == mh.SHA2_256 { if n, err := io.ReadFull(r, br.dst[1:34]); err != nil { - return len(br.dst) + n, Undef, err + return len(br.dst) + n, Undef, ErrInvalidCid{err} } br.dst = br.dst[:34] h, err := mh.Cast(br.dst) if err != nil { - return len(br.dst), Undef, err + return len(br.dst), Undef, ErrInvalidCid{err} } return len(br.dst), Cid{string(h)}, nil } if vers != 1 { - return len(br.dst), Undef, fmt.Errorf("expected 1 as the cid version number, got: %d", vers) + return len(br.dst), Undef, ErrInvalidCid{fmt.Errorf("expected 1 as the cid version number, got: %d", vers)} } // CID block encoding multicodec. _, err = varint.ReadUvarint(br) if err != nil { - return len(br.dst), Undef, err + return len(br.dst), Undef, ErrInvalidCid{err} } // We could replace most of the code below with go-multihash's ReadMultihash. @@ -741,19 +771,19 @@ func CidFromReader(r io.Reader) (int, Cid, error) { // Multihash hash function code. _, err = varint.ReadUvarint(br) if err != nil { - return len(br.dst), Undef, err + return len(br.dst), Undef, ErrInvalidCid{err} } // Multihash digest length. mhl, err := varint.ReadUvarint(br) if err != nil { - return len(br.dst), Undef, err + return len(br.dst), Undef, ErrInvalidCid{err} } // Refuse to make large allocations to prevent OOMs due to bugs. const maxDigestAlloc = 32 << 20 // 32MiB if mhl > maxDigestAlloc { - return len(br.dst), Undef, fmt.Errorf("refusing to allocate %d bytes for a digest", mhl) + return len(br.dst), Undef, ErrInvalidCid{fmt.Errorf("refusing to allocate %d bytes for a digest", mhl)} } // Fine to convert mhl to int, given maxDigestAlloc. @@ -772,7 +802,7 @@ func CidFromReader(r io.Reader) (int, Cid, error) { if n, err := io.ReadFull(r, br.dst[prefixLength:cidLength]); err != nil { // We can't use len(br.dst) here, // as we've only read n bytes past prefixLength. - return prefixLength + n, Undef, err + return prefixLength + n, Undef, ErrInvalidCid{err} } // This simply ensures the multihash is valid. @@ -780,7 +810,7 @@ func CidFromReader(r io.Reader) (int, Cid, error) { // for now, it helps ensure consistency with CidFromBytes. _, _, err = mh.MHFromBytes(br.dst[mhStart:]) if err != nil { - return len(br.dst), Undef, err + return len(br.dst), Undef, ErrInvalidCid{err} } return len(br.dst), Cid{string(br.dst)}, nil diff --git a/vendor/github.com/ipfs/go-cid/version.json b/vendor/github.com/ipfs/go-cid/version.json index 908483a3..26a7d478 100644 --- a/vendor/github.com/ipfs/go-cid/version.json +++ b/vendor/github.com/ipfs/go-cid/version.json @@ -1,3 +1,3 @@ { - "version": "v0.3.2" + "version": "v0.4.1" } diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index a2bf06e9..7a008a4d 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -3,7 +3,7 @@ before: hooks: - ./gen.sh - - go install mvdan.cc/garble@v0.7.2 + - go install mvdan.cc/garble@v0.9.3 builds: - diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index d73fb86e..55c8ca44 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,34 @@ This package provides various compression algorithms. # changelog +* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) + * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 + * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 + * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766 + * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773 + * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774 + +* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0) + * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685 + * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752 + * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755 + * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 + * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 + * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 + +* Jan 21st, 2023 (v1.15.15) + * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 + * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 + * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 + * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 + +* Jan 3rd, 2023 (v1.15.14) + + * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 + * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 + * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 + * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 + * Dec 11, 2022 (v1.15.13) * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 @@ -587,6 +615,8 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv * [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. * [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. * [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. +* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. +* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. # license diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go new file mode 100644 index 00000000..82882961 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -0,0 +1,989 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright (c) 2015 Klaus Post +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "encoding/binary" + "fmt" + "io" + "math" +) + +const ( + NoCompression = 0 + BestSpeed = 1 + BestCompression = 9 + DefaultCompression = -1 + + // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman + // entropy encoding. This mode is useful in compressing data that has + // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4) + // that lacks an entropy encoder. Compression gains are achieved when + // certain bytes in the input stream occur more frequently than others. + // + // Note that HuffmanOnly produces a compressed output that is + // RFC 1951 compliant. That is, any valid DEFLATE decompressor will + // continue to be able to decompress this output. + HuffmanOnly = -2 + ConstantCompression = HuffmanOnly // compatibility alias. + + logWindowSize = 15 + windowSize = 1 << logWindowSize + windowMask = windowSize - 1 + logMaxOffsetSize = 15 // Standard DEFLATE + minMatchLength = 4 // The smallest match that the compressor looks for + maxMatchLength = 258 // The longest match for the compressor + minOffsetSize = 1 // The shortest offset that makes any sense + + // The maximum number of tokens we will encode at the time. + // Smaller sizes usually creates less optimal blocks. + // Bigger can make context switching slow. + // We use this for levels 7-9, so we make it big. + maxFlateBlockTokens = 1 << 15 + maxStoreBlockSize = 65535 + hashBits = 17 // After 17 performance degrades + hashSize = 1 << hashBits + hashMask = (1 << hashBits) - 1 + hashShift = (hashBits + minMatchLength - 1) / minMatchLength + maxHashOffset = 1 << 28 + + skipNever = math.MaxInt32 + + debugDeflate = false +) + +type compressionLevel struct { + good, lazy, nice, chain, fastSkipHashing, level int +} + +// Compression levels have been rebalanced from zlib deflate defaults +// to give a bigger spread in speed and compression. +// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/ +var levels = []compressionLevel{ + {}, // 0 + // Level 1-6 uses specialized algorithm - values not used + {0, 0, 0, 0, 0, 1}, + {0, 0, 0, 0, 0, 2}, + {0, 0, 0, 0, 0, 3}, + {0, 0, 0, 0, 0, 4}, + {0, 0, 0, 0, 0, 5}, + {0, 0, 0, 0, 0, 6}, + // Levels 7-9 use increasingly more lazy matching + // and increasingly stringent conditions for "good enough". + {8, 12, 16, 24, skipNever, 7}, + {16, 30, 40, 64, skipNever, 8}, + {32, 258, 258, 1024, skipNever, 9}, +} + +// advancedState contains state for the advanced levels, with bigger hash tables, etc. +type advancedState struct { + // deflate state + length int + offset int + maxInsertIndex int + chainHead int + hashOffset int + + ii uint16 // position of last match, intended to overflow to reset. + + // input window: unprocessed data is window[index:windowEnd] + index int + estBitsPerByte int + hashMatch [maxMatchLength + minMatchLength]uint32 + + // Input hash chains + // hashHead[hashValue] contains the largest inputIndex with the specified hash value + // If hashHead[hashValue] is within the current window, then + // hashPrev[hashHead[hashValue] & windowMask] contains the previous index + // with the same hash value. + hashHead [hashSize]uint32 + hashPrev [windowSize]uint32 +} + +type compressor struct { + compressionLevel + + h *huffmanEncoder + w *huffmanBitWriter + + // compression algorithm + fill func(*compressor, []byte) int // copy data to window + step func(*compressor) // process window + + window []byte + windowEnd int + blockStart int // window index where current tokens start + err error + + // queued output tokens + tokens tokens + fast fastEnc + state *advancedState + + sync bool // requesting flush + byteAvailable bool // if true, still need to process window[index-1]. +} + +func (d *compressor) fillDeflate(b []byte) int { + s := d.state + if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) { + // shift the window by windowSize + //copy(d.window[:], d.window[windowSize:2*windowSize]) + *(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:]) + s.index -= windowSize + d.windowEnd -= windowSize + if d.blockStart >= windowSize { + d.blockStart -= windowSize + } else { + d.blockStart = math.MaxInt32 + } + s.hashOffset += windowSize + if s.hashOffset > maxHashOffset { + delta := s.hashOffset - 1 + s.hashOffset -= delta + s.chainHead -= delta + // Iterate over slices instead of arrays to avoid copying + // the entire table onto the stack (Issue #18625). + for i, v := range s.hashPrev[:] { + if int(v) > delta { + s.hashPrev[i] = uint32(int(v) - delta) + } else { + s.hashPrev[i] = 0 + } + } + for i, v := range s.hashHead[:] { + if int(v) > delta { + s.hashHead[i] = uint32(int(v) - delta) + } else { + s.hashHead[i] = 0 + } + } + } + } + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error { + if index > 0 || eof { + var window []byte + if d.blockStart <= index { + window = d.window[d.blockStart:index] + } + d.blockStart = index + //d.w.writeBlock(tok, eof, window) + d.w.writeBlockDynamic(tok, eof, window, d.sync) + return d.w.err + } + return nil +} + +// writeBlockSkip writes the current block and uses the number of tokens +// to determine if the block should be stored on no matches, or +// only huffman encoded. +func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { + if index > 0 || eof { + if d.blockStart <= index { + window := d.window[d.blockStart:index] + // If we removed less than a 64th of all literals + // we huffman compress the block. + if int(tok.n) > len(window)-int(tok.n>>6) { + d.w.writeBlockHuff(eof, window, d.sync) + } else { + // Write a dynamic huffman block. + d.w.writeBlockDynamic(tok, eof, window, d.sync) + } + } else { + d.w.writeBlock(tok, eof, nil) + } + d.blockStart = index + return d.w.err + } + return nil +} + +// fillWindow will fill the current window with the supplied +// dictionary and calculate all hashes. +// This is much faster than doing a full encode. +// Should only be used after a start/reset. +func (d *compressor) fillWindow(b []byte) { + // Do not fill window if we are in store-only or huffman mode. + if d.level <= 0 { + return + } + if d.fast != nil { + // encode the last data, but discard the result + if len(b) > maxMatchOffset { + b = b[len(b)-maxMatchOffset:] + } + d.fast.Encode(&d.tokens, b) + d.tokens.Reset() + return + } + s := d.state + // If we are given too much, cut it. + if len(b) > windowSize { + b = b[len(b)-windowSize:] + } + // Add all to window. + n := copy(d.window[d.windowEnd:], b) + + // Calculate 256 hashes at the time (more L1 cache hits) + loops := (n + 256 - minMatchLength) / 256 + for j := 0; j < loops; j++ { + startindex := j * 256 + end := startindex + 256 + minMatchLength - 1 + if end > n { + end = n + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + + if dstSize <= 0 { + continue + } + + dst := s.hashMatch[:dstSize] + bulkHash4(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + s.hashPrev[di&windowMask] = s.hashHead[newH] + // Set the head of the hash chain to us. + s.hashHead[newH] = uint32(di + s.hashOffset) + } + } + // Update window information. + d.windowEnd += n + s.index = n +} + +// Try to find a match starting at index whose length is greater than prevSize. +// We only look at chainCount possibilities before giving up. +// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead +func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) { + minMatchLook := maxMatchLength + if lookahead < minMatchLook { + minMatchLook = lookahead + } + + win := d.window[0 : pos+minMatchLook] + + // We quit when we get a match that's at least nice long + nice := len(win) - pos + if d.nice < nice { + nice = d.nice + } + + // If we've got a match that's good enough, only look in 1/4 the chain. + tries := d.chain + length = minMatchLength - 1 + + wEnd := win[pos+length] + wPos := win[pos:] + minIndex := pos - windowSize + if minIndex < 0 { + minIndex = 0 + } + offset = 0 + + if d.chain < 100 { + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLen(win[i:i+minMatchLook], wPos) + if n > length { + length = n + offset = pos - i + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + if i <= minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset + if i < minIndex { + break + } + } + return + } + + // Minimum gain to accept a match. + cGain := 4 + + // Some like it higher (CSV), some like it lower (JSON) + const baseCost = 3 + // Base is 4 bytes at with an additional cost. + // Matches must be better than this. + + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLen(win[i:i+minMatchLook], wPos) + if n > length { + // Calculate gain. Estimate + newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]]) + + //fmt.Println("gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]), "this-len:", n, "prev-len:", length) + if newGain > cGain { + length = n + offset = pos - i + cGain = newGain + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + } + if i <= minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset + if i < minIndex { + break + } + } + return +} + +func (d *compressor) writeStoredBlock(buf []byte) error { + if d.w.writeStoredHeader(len(buf), false); d.w.err != nil { + return d.w.err + } + d.w.writeBytes(buf) + return d.w.err +} + +// hash4 returns a hash representation of the first 4 bytes +// of the supplied slice. +// The caller must ensure that len(b) >= 4. +func hash4(b []byte) uint32 { + return hash4u(binary.LittleEndian.Uint32(b), hashBits) +} + +// hash4 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4u(u uint32, h uint8) uint32 { + return (u * prime4bytes) >> (32 - h) +} + +// bulkHash4 will compute hashes using the same +// algorithm as hash4 +func bulkHash4(b []byte, dst []uint32) { + if len(b) < 4 { + return + } + hb := binary.LittleEndian.Uint32(b) + + dst[0] = hash4u(hb, hashBits) + end := len(b) - 4 + 1 + for i := 1; i < end; i++ { + hb = (hb >> 8) | uint32(b[i+3])<<24 + dst[i] = hash4u(hb, hashBits) + } +} + +func (d *compressor) initDeflate() { + d.window = make([]byte, 2*windowSize) + d.byteAvailable = false + d.err = nil + if d.state == nil { + return + } + s := d.state + s.index = 0 + s.hashOffset = 1 + s.length = minMatchLength - 1 + s.offset = 0 + s.chainHead = -1 +} + +// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, +// meaning it always has lazy matching on. +func (d *compressor) deflateLazy() { + s := d.state + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = debugDeflate + + if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { + return + } + if d.windowEnd != s.index && d.chain > 100 { + // Get literal huffman coder. + if d.h == nil { + d.h = newHuffmanEncoder(maxFlateBlockTokens) + } + var tmp [256]uint16 + for _, v := range d.window[s.index:d.windowEnd] { + tmp[v]++ + } + d.h.generate(tmp[:], 15) + } + + s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + + for { + if sanity && s.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - s.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && s.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + // Flush current output block if any. + if d.byteAvailable { + // There is still one pending token that needs to be flushed + d.tokens.AddLiteral(d.window[s.index-1]) + d.byteAvailable = false + } + if d.tokens.n > 0 { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + return + } + } + if s.index < s.maxInsertIndex { + // Update the hash + hash := hash4(d.window[s.index:]) + ch := s.hashHead[hash] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[hash] = uint32(s.index + s.hashOffset) + } + prevLength := s.length + prevOffset := s.offset + s.length = minMatchLength - 1 + s.offset = 0 + minIndex := s.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { + if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok { + s.length = newLength + s.offset = newOffset + } + } + + if prevLength >= minMatchLength && s.length <= prevLength { + // No better match, but check for better match at end... + // + // Skip forward a number of bytes. + // Offset of 2 seems to yield best results. 3 is sometimes better. + const checkOff = 2 + + // Check all, except full length + if prevLength < maxMatchLength-checkOff { + prevIndex := s.index - 1 + if prevIndex+prevLength < s.maxInsertIndex { + end := lookahead + if lookahead > maxMatchLength+checkOff { + end = maxMatchLength + checkOff + } + end += prevIndex + + // Hash at match end. + h := hash4(d.window[prevIndex+prevLength:]) + ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { + length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) + // It seems like a pure length metric is best. + if length > prevLength { + prevLength = length + prevOffset = prevIndex - ch2 + + // Extend back... + for i := checkOff - 1; i >= 0; i-- { + if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i] { + // Emit tokens we "owe" + for j := 0; j <= i; j++ { + d.tokens.AddLiteral(d.window[prevIndex+j]) + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.index++ + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } + } + break + } else { + prevLength++ + } + } + } else if false { + // Check one further ahead. + // Only rarely better, disabled for now. + prevIndex++ + h := hash4(d.window[prevIndex+prevLength:]) + ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { + length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) + // It seems like a pure length metric is best. + if length > prevLength+checkOff { + prevLength = length + prevOffset = prevIndex - ch2 + prevIndex-- + + // Extend back... + for i := checkOff; i >= 0; i-- { + if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i-1] { + // Emit tokens we "owe" + for j := 0; j <= i; j++ { + d.tokens.AddLiteral(d.window[prevIndex+j]) + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.index++ + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } + } + break + } else { + prevLength++ + } + } + } + } + } + } + } + } + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) + + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + newIndex := s.index + prevLength - 1 + // Calculate missing hashes + end := newIndex + if end > s.maxInsertIndex { + end = s.maxInsertIndex + } + end += minMatchLength - 1 + startindex := s.index + 1 + if startindex > s.maxInsertIndex { + startindex = s.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := s.hashMatch[:dstSize] + bulkHash4(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + s.hashPrev[di&windowMask] = s.hashHead[newH] + // Set the head of the hash chain to us. + s.hashHead[newH] = uint32(di + s.hashOffset) + } + } + + s.index = newIndex + d.byteAvailable = false + s.length = minMatchLength - 1 + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.ii = 0 + } else { + // Reset, if we got a match this run. + if s.length >= minMatchLength { + s.ii = 0 + } + // We have a byte waiting. Emit it. + if d.byteAvailable { + s.ii++ + d.tokens.AddLiteral(d.window[s.index-1]) + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.index++ + + // If we have a long run of no matches, skip additional bytes + // Resets when s.ii overflows after 64KB. + if n := int(s.ii) - d.chain; n > 0 { + n = 1 + int(n>>6) + for j := 0; j < n; j++ { + if s.index >= d.windowEnd-1 { + break + } + d.tokens.AddLiteral(d.window[s.index-1]) + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + // Index... + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } + s.index++ + } + // Flush last byte + d.tokens.AddLiteral(d.window[s.index-1]) + d.byteAvailable = false + // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + } + } else { + s.index++ + d.byteAvailable = true + } + } + } +} + +func (d *compressor) store() { + if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + d.windowEnd = 0 + } +} + +// fillWindow will fill the buffer with data for huffman-only compression. +// The number of bytes copied is returned. +func (d *compressor) fillBlock(b []byte) int { + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +// storeHuff will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeHuff() { + if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { + return + } + d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) + d.err = d.w.err + d.windowEnd = 0 +} + +// storeFast will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeFast() { + // We only compress if we have maxStoreBlockSize. + if d.windowEnd < len(d.window) { + if !d.sync { + return + } + // Handle extremely small sizes. + if d.windowEnd < 128 { + if d.windowEnd == 0 { + return + } + if d.windowEnd <= 32 { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + } else { + d.w.writeBlockHuff(false, d.window[:d.windowEnd], true) + d.err = d.w.err + } + d.tokens.Reset() + d.windowEnd = 0 + d.fast.Reset() + return + } + } + + d.fast.Encode(&d.tokens, d.window[:d.windowEnd]) + // If we made zero matches, store the block as is. + if d.tokens.n == 0 { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + // If we removed less than 1/16th, huffman compress the block. + } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) { + d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) + d.err = d.w.err + } else { + d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync) + d.err = d.w.err + } + d.tokens.Reset() + d.windowEnd = 0 +} + +// write will add input byte to the stream. +// Unless an error occurs all bytes will be consumed. +func (d *compressor) write(b []byte) (n int, err error) { + if d.err != nil { + return 0, d.err + } + n = len(b) + for len(b) > 0 { + if d.windowEnd == len(d.window) || d.sync { + d.step(d) + } + b = b[d.fill(d, b):] + if d.err != nil { + return 0, d.err + } + } + return n, d.err +} + +func (d *compressor) syncFlush() error { + d.sync = true + if d.err != nil { + return d.err + } + d.step(d) + if d.err == nil { + d.w.writeStoredHeader(0, false) + d.w.flush() + d.err = d.w.err + } + d.sync = false + return d.err +} + +func (d *compressor) init(w io.Writer, level int) (err error) { + d.w = newHuffmanBitWriter(w) + + switch { + case level == NoCompression: + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).store + case level == ConstantCompression: + d.w.logNewTablePenalty = 10 + d.window = make([]byte, 32<<10) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeHuff + case level == DefaultCompression: + level = 5 + fallthrough + case level >= 1 && level <= 6: + d.w.logNewTablePenalty = 7 + d.fast = newFastEnc(level) + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeFast + case 7 <= level && level <= 9: + d.w.logNewTablePenalty = 8 + d.state = &advancedState{} + d.compressionLevel = levels[level] + d.initDeflate() + d.fill = (*compressor).fillDeflate + d.step = (*compressor).deflateLazy + default: + return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) + } + d.level = level + return nil +} + +// reset the state of the compressor. +func (d *compressor) reset(w io.Writer) { + d.w.reset(w) + d.sync = false + d.err = nil + // We only need to reset a few things for Snappy. + if d.fast != nil { + d.fast.Reset() + d.windowEnd = 0 + d.tokens.Reset() + return + } + switch d.compressionLevel.chain { + case 0: + // level was NoCompression or ConstantCompresssion. + d.windowEnd = 0 + default: + s := d.state + s.chainHead = -1 + for i := range s.hashHead { + s.hashHead[i] = 0 + } + for i := range s.hashPrev { + s.hashPrev[i] = 0 + } + s.hashOffset = 1 + s.index, d.windowEnd = 0, 0 + d.blockStart, d.byteAvailable = 0, false + d.tokens.Reset() + s.length = minMatchLength - 1 + s.offset = 0 + s.ii = 0 + s.maxInsertIndex = 0 + } +} + +func (d *compressor) close() error { + if d.err != nil { + return d.err + } + d.sync = true + d.step(d) + if d.err != nil { + return d.err + } + if d.w.writeStoredHeader(0, true); d.w.err != nil { + return d.w.err + } + d.w.flush() + d.w.reset(nil) + return d.w.err +} + +// NewWriter returns a new Writer compressing data at the given level. +// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression); +// higher levels typically run slower but compress more. +// Level 0 (NoCompression) does not attempt any compression; it only adds the +// necessary DEFLATE framing. +// Level -1 (DefaultCompression) uses the default compression level. +// Level -2 (ConstantCompression) will use Huffman compression only, giving +// a very fast compression for all types of input, but sacrificing considerable +// compression efficiency. +// +// If level is in the range [-2, 9] then the error returned will be nil. +// Otherwise the error returned will be non-nil. +func NewWriter(w io.Writer, level int) (*Writer, error) { + var dw Writer + if err := dw.d.init(w, level); err != nil { + return nil, err + } + return &dw, nil +} + +// NewWriterDict is like NewWriter but initializes the new +// Writer with a preset dictionary. The returned Writer behaves +// as if the dictionary had been written to it without producing +// any compressed output. The compressed data written to w +// can only be decompressed by a Reader initialized with the +// same dictionary. +func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { + zw, err := NewWriter(w, level) + if err != nil { + return nil, err + } + zw.d.fillWindow(dict) + zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method. + return zw, err +} + +// A Writer takes data written to it and writes the compressed +// form of that data to an underlying writer (see NewWriter). +type Writer struct { + d compressor + dict []byte +} + +// Write writes data to w, which will eventually write the +// compressed form of data to its underlying writer. +func (w *Writer) Write(data []byte) (n int, err error) { + return w.d.write(data) +} + +// Flush flushes any pending data to the underlying writer. +// It is useful mainly in compressed network protocols, to ensure that +// a remote reader has enough data to reconstruct a packet. +// Flush does not return until the data has been written. +// Calling Flush when there is no pending data still causes the Writer +// to emit a sync marker of at least 4 bytes. +// If the underlying writer returns an error, Flush returns that error. +// +// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. +func (w *Writer) Flush() error { + // For more about flushing: + // http://www.bolet.org/~pornin/deflate-flush.html + return w.d.syncFlush() +} + +// Close flushes and closes the writer. +func (w *Writer) Close() error { + return w.d.close() +} + +// Reset discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level and dictionary. +func (w *Writer) Reset(dst io.Writer) { + if len(w.dict) > 0 { + // w was created with NewWriterDict + w.d.reset(dst) + if dst != nil { + w.d.fillWindow(w.dict) + } + } else { + // w was created with NewWriter + w.d.reset(dst) + } +} + +// ResetDict discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level, but sets a specific dictionary. +func (w *Writer) ResetDict(dst io.Writer, dict []byte) { + w.dict = dict + w.d.reset(dst) + w.d.fillWindow(w.dict) +} diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go new file mode 100644 index 00000000..bb36351a --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go @@ -0,0 +1,184 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// dictDecoder implements the LZ77 sliding dictionary as used in decompression. +// LZ77 decompresses data through sequences of two forms of commands: +// +// - Literal insertions: Runs of one or more symbols are inserted into the data +// stream as is. This is accomplished through the writeByte method for a +// single symbol, or combinations of writeSlice/writeMark for multiple symbols. +// Any valid stream must start with a literal insertion if no preset dictionary +// is used. +// +// - Backward copies: Runs of one or more symbols are copied from previously +// emitted data. Backward copies come as the tuple (dist, length) where dist +// determines how far back in the stream to copy from and length determines how +// many bytes to copy. Note that it is valid for the length to be greater than +// the distance. Since LZ77 uses forward copies, that situation is used to +// perform a form of run-length encoding on repeated runs of symbols. +// The writeCopy and tryWriteCopy are used to implement this command. +// +// For performance reasons, this implementation performs little to no sanity +// checks about the arguments. As such, the invariants documented for each +// method call must be respected. +type dictDecoder struct { + hist []byte // Sliding window history + + // Invariant: 0 <= rdPos <= wrPos <= len(hist) + wrPos int // Current output position in buffer + rdPos int // Have emitted hist[:rdPos] already + full bool // Has a full window length been written yet? +} + +// init initializes dictDecoder to have a sliding window dictionary of the given +// size. If a preset dict is provided, it will initialize the dictionary with +// the contents of dict. +func (dd *dictDecoder) init(size int, dict []byte) { + *dd = dictDecoder{hist: dd.hist} + + if cap(dd.hist) < size { + dd.hist = make([]byte, size) + } + dd.hist = dd.hist[:size] + + if len(dict) > len(dd.hist) { + dict = dict[len(dict)-len(dd.hist):] + } + dd.wrPos = copy(dd.hist, dict) + if dd.wrPos == len(dd.hist) { + dd.wrPos = 0 + dd.full = true + } + dd.rdPos = dd.wrPos +} + +// histSize reports the total amount of historical data in the dictionary. +func (dd *dictDecoder) histSize() int { + if dd.full { + return len(dd.hist) + } + return dd.wrPos +} + +// availRead reports the number of bytes that can be flushed by readFlush. +func (dd *dictDecoder) availRead() int { + return dd.wrPos - dd.rdPos +} + +// availWrite reports the available amount of output buffer space. +func (dd *dictDecoder) availWrite() int { + return len(dd.hist) - dd.wrPos +} + +// writeSlice returns a slice of the available buffer to write data to. +// +// This invariant will be kept: len(s) <= availWrite() +func (dd *dictDecoder) writeSlice() []byte { + return dd.hist[dd.wrPos:] +} + +// writeMark advances the writer pointer by cnt. +// +// This invariant must be kept: 0 <= cnt <= availWrite() +func (dd *dictDecoder) writeMark(cnt int) { + dd.wrPos += cnt +} + +// writeByte writes a single byte to the dictionary. +// +// This invariant must be kept: 0 < availWrite() +func (dd *dictDecoder) writeByte(c byte) { + dd.hist[dd.wrPos] = c + dd.wrPos++ +} + +// writeCopy copies a string at a given (dist, length) to the output. +// This returns the number of bytes copied and may be less than the requested +// length if the available space in the output buffer is too small. +// +// This invariant must be kept: 0 < dist <= histSize() +func (dd *dictDecoder) writeCopy(dist, length int) int { + dstBase := dd.wrPos + dstPos := dstBase + srcPos := dstPos - dist + endPos := dstPos + length + if endPos > len(dd.hist) { + endPos = len(dd.hist) + } + + // Copy non-overlapping section after destination position. + // + // This section is non-overlapping in that the copy length for this section + // is always less than or equal to the backwards distance. This can occur + // if a distance refers to data that wraps-around in the buffer. + // Thus, a backwards copy is performed here; that is, the exact bytes in + // the source prior to the copy is placed in the destination. + if srcPos < 0 { + srcPos += len(dd.hist) + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:]) + srcPos = 0 + } + + // Copy possibly overlapping section before destination position. + // + // This section can overlap if the copy length for this section is larger + // than the backwards distance. This is allowed by LZ77 so that repeated + // strings can be succinctly represented using (dist, length) pairs. + // Thus, a forwards copy is performed here; that is, the bytes copied is + // possibly dependent on the resulting bytes in the destination as the copy + // progresses along. This is functionally equivalent to the following: + // + // for i := 0; i < endPos-dstPos; i++ { + // dd.hist[dstPos+i] = dd.hist[srcPos+i] + // } + // dstPos = endPos + // + for dstPos < endPos { + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) + } + + dd.wrPos = dstPos + return dstPos - dstBase +} + +// tryWriteCopy tries to copy a string at a given (distance, length) to the +// output. This specialized version is optimized for short distances. +// +// This method is designed to be inlined for performance reasons. +// +// This invariant must be kept: 0 < dist <= histSize() +func (dd *dictDecoder) tryWriteCopy(dist, length int) int { + dstPos := dd.wrPos + endPos := dstPos + length + if dstPos < dist || endPos > len(dd.hist) { + return 0 + } + dstBase := dstPos + srcPos := dstPos - dist + + // Copy possibly overlapping section before destination position. +loop: + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) + if dstPos < endPos { + goto loop // Avoid for-loop so that this function can be inlined + } + + dd.wrPos = dstPos + return dstPos - dstBase +} + +// readFlush returns a slice of the historical buffer that is ready to be +// emitted to the user. The data returned by readFlush must be fully consumed +// before calling any other dictDecoder methods. +func (dd *dictDecoder) readFlush() []byte { + toRead := dd.hist[dd.rdPos:dd.wrPos] + dd.rdPos = dd.wrPos + if dd.wrPos == len(dd.hist) { + dd.wrPos, dd.rdPos = 0, 0 + dd.full = true + } + return toRead +} diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go new file mode 100644 index 00000000..24caf5f7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -0,0 +1,216 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Modified for deflate by Klaus Post (c) 2015. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "encoding/binary" + "fmt" + "math/bits" +) + +type fastEnc interface { + Encode(dst *tokens, src []byte) + Reset() +} + +func newFastEnc(level int) fastEnc { + switch level { + case 1: + return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}} + case 2: + return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}} + case 3: + return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}} + case 4: + return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}} + case 5: + return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}} + case 6: + return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}} + default: + panic("invalid level specified") + } +} + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. + baseMatchOffset = 1 // The smallest match offset + baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 + maxMatchOffset = 1 << 15 // The largest match offset + + bTableBits = 17 // Bits used in the big tables + bTableSize = 1 << bTableBits // Size of the table + allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history. + bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this. +) + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +func load3232(b []byte, i int32) uint32 { + return binary.LittleEndian.Uint32(b[i:]) +} + +func load6432(b []byte, i int32) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +type tableEntry struct { + offset int32 +} + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastGen struct { + hist []byte + cur int32 +} + +func (e *fastGen) addBlock(src []byte) int32 { + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.hist = make([]byte, 0, allocHistory) + } else { + if cap(e.hist) < maxMatchOffset*2 { + panic("unexpected buffer size") + } + // Move down + offset := int32(len(e.hist)) - maxMatchOffset + // copy(e.hist[0:maxMatchOffset], e.hist[offset:]) + *(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:maxMatchOffset] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +type tableEntryPrev struct { + Cur tableEntry + Prev tableEntry +} + +// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash7(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64)) +} + +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { + switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) + case 5: + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) + case 6: + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) + case 7: + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) + case 8: + return uint32((u * prime8bytes) >> (64 - length)) + default: + return (uint32(u) * prime4bytes) >> (32 - length) + } +} + +// matchlen will return the match length between offsets and t in src. +// The maximum length returned is maxMatchLength - 4. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastGen) matchlen(s, t int32, src []byte) int32 { + if debugDecode { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > maxMatchOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + s1 := int(s) + maxMatchLength - 4 + if s1 > len(src) { + s1 = len(src) + } + + // Extend the match to be as long as possible. + return int32(matchLen(src[s:s1], src[t:])) +} + +// matchlenLong will return the match length between offsets and t in src. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { + if debugDeflate { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > maxMatchOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastGen) Reset() { + if cap(e.hist) < allocHistory { + e.hist = make([]byte, 0, allocHistory) + } + // We offset current position so everything will be out of reach. + // If we are above the buffer reset it will be cleared anyway since len(hist) == 0. + if e.cur <= bufferReset { + e.cur += maxMatchOffset + int32(len(e.hist)) + } + e.hist = e.hist[:0] +} + +// matchLen returns the maximum length. +// 'a' must be the shortest of the two. +func matchLen(a, b []byte) int { + var checked int + + for len(a) >= 8 { + if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { + return checked + (bits.TrailingZeros64(diff) >> 3) + } + checked += 8 + a = a[8:] + b = b[8:] + } + b = b[:len(a)] + for i := range a { + if a[i] != b[i] { + return i + checked + } + } + return len(a) + checked +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go new file mode 100644 index 00000000..89a5dd89 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -0,0 +1,1187 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "encoding/binary" + "fmt" + "io" + "math" +) + +const ( + // The largest offset code. + offsetCodeCount = 30 + + // The special code used to mark the end of a block. + endBlockMarker = 256 + + // The first length code. + lengthCodesStart = 257 + + // The number of codegen codes. + codegenCodeCount = 19 + badCode = 255 + + // maxPredefinedTokens is the maximum number of tokens + // where we check if fixed size is smaller. + maxPredefinedTokens = 250 + + // bufferFlushSize indicates the buffer size + // after which bytes are flushed to the writer. + // Should preferably be a multiple of 6, since + // we accumulate 6 bytes between writes to the buffer. + bufferFlushSize = 246 + + // bufferSize is the actual output byte buffer size. + // It must have additional headroom for a flush + // which can contain up to 8 bytes. + bufferSize = bufferFlushSize + 8 +) + +// Minimum length code that emits bits. +const lengthExtraBitsMinCode = 8 + +// The number of extra bits needed by length code X - LENGTH_CODES_START. +var lengthExtraBits = [32]uint8{ + /* 257 */ 0, 0, 0, + /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, + /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, + /* 280 */ 4, 5, 5, 5, 5, 0, +} + +// The length indicated by length code X - LENGTH_CODES_START. +var lengthBase = [32]uint8{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, + 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, + 64, 80, 96, 112, 128, 160, 192, 224, 255, +} + +// Minimum offset code that emits bits. +const offsetExtraBitsMinCode = 4 + +// offset code word extra bits. +var offsetExtraBits = [32]int8{ + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, + /* extended window */ + 14, 14, +} + +var offsetCombined = [32]uint32{} + +func init() { + var offsetBase = [32]uint32{ + /* normal deflate */ + 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, + 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, + 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, + 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, + 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, + 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, + + /* extended window */ + 0x008000, 0x00c000, + } + + for i := range offsetCombined[:] { + // Don't use extended window values... + if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 { + continue + } + offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8) + } +} + +// The odd order in which the codegen code sizes are written. +var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +type huffmanBitWriter struct { + // writer is the underlying writer. + // Do not use it directly; use the write method, which ensures + // that Write errors are sticky. + writer io.Writer + + // Data waiting to be written is bytes[0:nbytes] + // and then the low nbits of bits. + bits uint64 + nbits uint8 + nbytes uint8 + lastHuffMan bool + literalEncoding *huffmanEncoder + tmpLitEncoding *huffmanEncoder + offsetEncoding *huffmanEncoder + codegenEncoding *huffmanEncoder + err error + lastHeader int + // Set between 0 (reused block can be up to 2x the size) + logNewTablePenalty uint + bytes [256 + 8]byte + literalFreq [lengthCodesStart + 32]uint16 + offsetFreq [32]uint16 + codegenFreq [codegenCodeCount]uint16 + + // codegen must have an extra space for the final symbol. + codegen [literalCount + offsetCodeCount + 1]uint8 +} + +// Huffman reuse. +// +// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections. +// +// This is controlled by several variables: +// +// If lastHeader is non-zero the Huffman table can be reused. +// This also indicates that a Huffman table has been generated that can output all +// possible symbols. +// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated +// an EOB with the previous table must be written. +// +// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid. +// +// An incoming block estimates the output size of a new table using a 'fresh' by calculating the +// optimal size and adding a penalty in 'logNewTablePenalty'. +// A Huffman table is not optimal, which is why we add a penalty, and generating a new table +// is slower both for compression and decompression. + +func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { + return &huffmanBitWriter{ + writer: w, + literalEncoding: newHuffmanEncoder(literalCount), + tmpLitEncoding: newHuffmanEncoder(literalCount), + codegenEncoding: newHuffmanEncoder(codegenCodeCount), + offsetEncoding: newHuffmanEncoder(offsetCodeCount), + } +} + +func (w *huffmanBitWriter) reset(writer io.Writer) { + w.writer = writer + w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil + w.lastHeader = 0 + w.lastHuffMan = false +} + +func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) { + a := t.offHist[:offsetCodeCount] + b := w.offsetEncoding.codes + b = b[:len(a)] + for i, v := range a { + if v != 0 && b[i].zero() { + return false + } + } + + a = t.extraHist[:literalCount-256] + b = w.literalEncoding.codes[256:literalCount] + b = b[:len(a)] + for i, v := range a { + if v != 0 && b[i].zero() { + return false + } + } + + a = t.litHist[:256] + b = w.literalEncoding.codes[:len(a)] + for i, v := range a { + if v != 0 && b[i].zero() { + return false + } + } + return true +} + +func (w *huffmanBitWriter) flush() { + if w.err != nil { + w.nbits = 0 + return + } + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + n := w.nbytes + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + if w.nbits > 8 { // Avoid underflow + w.nbits -= 8 + } else { + w.nbits = 0 + } + n++ + } + w.bits = 0 + w.write(w.bytes[:n]) + w.nbytes = 0 +} + +func (w *huffmanBitWriter) write(b []byte) { + if w.err != nil { + return + } + _, w.err = w.writer.Write(b) +} + +func (w *huffmanBitWriter) writeBits(b int32, nb uint8) { + w.bits |= uint64(b) << (w.nbits & 63) + w.nbits += nb + if w.nbits >= 48 { + w.writeOutBits() + } +} + +func (w *huffmanBitWriter) writeBytes(bytes []byte) { + if w.err != nil { + return + } + n := w.nbytes + if w.nbits&7 != 0 { + w.err = InternalError("writeBytes with unfinished bits") + return + } + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + w.nbits -= 8 + n++ + } + if n != 0 { + w.write(w.bytes[:n]) + } + w.nbytes = 0 + w.write(bytes) +} + +// RFC 1951 3.2.7 specifies a special run-length encoding for specifying +// the literal and offset lengths arrays (which are concatenated into a single +// array). This method generates that run-length encoding. +// +// The result is written into the codegen array, and the frequencies +// of each code is written into the codegenFreq array. +// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional +// information. Code badCode is an end marker +// +// numLiterals The number of literals in literalEncoding +// numOffsets The number of offsets in offsetEncoding +// litenc, offenc The literal and offset encoder to use +func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) { + for i := range w.codegenFreq { + w.codegenFreq[i] = 0 + } + // Note that we are using codegen both as a temporary variable for holding + // a copy of the frequencies, and as the place where we put the result. + // This is fine because the output is always shorter than the input used + // so far. + codegen := w.codegen[:] // cache + // Copy the concatenated code sizes to codegen. Put a marker at the end. + cgnl := codegen[:numLiterals] + for i := range cgnl { + cgnl[i] = litEnc.codes[i].len() + } + + cgnl = codegen[numLiterals : numLiterals+numOffsets] + for i := range cgnl { + cgnl[i] = offEnc.codes[i].len() + } + codegen[numLiterals+numOffsets] = badCode + + size := codegen[0] + count := 1 + outIndex := 0 + for inIndex := 1; size != badCode; inIndex++ { + // INVARIANT: We have seen "count" copies of size that have not yet + // had output generated for them. + nextSize := codegen[inIndex] + if nextSize == size { + count++ + continue + } + // We need to generate codegen indicating "count" of size. + if size != 0 { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + count-- + for count >= 3 { + n := 6 + if n > count { + n = count + } + codegen[outIndex] = 16 + outIndex++ + codegen[outIndex] = uint8(n - 3) + outIndex++ + w.codegenFreq[16]++ + count -= n + } + } else { + for count >= 11 { + n := 138 + if n > count { + n = count + } + codegen[outIndex] = 18 + outIndex++ + codegen[outIndex] = uint8(n - 11) + outIndex++ + w.codegenFreq[18]++ + count -= n + } + if count >= 3 { + // count >= 3 && count <= 10 + codegen[outIndex] = 17 + outIndex++ + codegen[outIndex] = uint8(count - 3) + outIndex++ + w.codegenFreq[17]++ + count = 0 + } + } + count-- + for ; count >= 0; count-- { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + } + // Set up invariant for next time through the loop. + size = nextSize + count = 1 + } + // Marker indicating the end of the codegen. + codegen[outIndex] = badCode +} + +func (w *huffmanBitWriter) codegens() int { + numCodegens := len(w.codegenFreq) + for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { + numCodegens-- + } + return numCodegens +} + +func (w *huffmanBitWriter) headerSize() (size, numCodegens int) { + numCodegens = len(w.codegenFreq) + for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { + numCodegens-- + } + return 3 + 5 + 5 + 4 + (3 * numCodegens) + + w.codegenEncoding.bitLength(w.codegenFreq[:]) + + int(w.codegenFreq[16])*2 + + int(w.codegenFreq[17])*3 + + int(w.codegenFreq[18])*7, numCodegens +} + +// dynamicSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) { + size = litEnc.bitLength(w.literalFreq[:]) + + offEnc.bitLength(w.offsetFreq[:]) + return size +} + +// dynamicSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { + header, numCodegens := w.headerSize() + size = header + + litEnc.bitLength(w.literalFreq[:]) + + offEnc.bitLength(w.offsetFreq[:]) + + extraBits + return size, numCodegens +} + +// extraBitSize will return the number of bits that will be written +// as "extra" bits on matches. +func (w *huffmanBitWriter) extraBitSize() int { + total := 0 + for i, n := range w.literalFreq[257:literalCount] { + total += int(n) * int(lengthExtraBits[i&31]) + } + for i, n := range w.offsetFreq[:offsetCodeCount] { + total += int(n) * int(offsetExtraBits[i&31]) + } + return total +} + +// fixedSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) fixedSize(extraBits int) int { + return 3 + + fixedLiteralEncoding.bitLength(w.literalFreq[:]) + + fixedOffsetEncoding.bitLength(w.offsetFreq[:]) + + extraBits +} + +// storedSize calculates the stored size, including header. +// The function returns the size in bits and whether the block +// fits inside a single block. +func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { + if in == nil { + return 0, false + } + if len(in) <= maxStoreBlockSize { + return (len(in) + 5) * 8, true + } + return 0, false +} + +func (w *huffmanBitWriter) writeCode(c hcode) { + // The function does not get inlined if we "& 63" the shift. + w.bits |= c.code64() << (w.nbits & 63) + w.nbits += c.len() + if w.nbits >= 48 { + w.writeOutBits() + } +} + +// writeOutBits will write bits to the buffer. +func (w *huffmanBitWriter) writeOutBits() { + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + n := w.nbytes + + // We over-write, but faster... + binary.LittleEndian.PutUint64(w.bytes[n:], bits) + n += 6 + + if n >= bufferFlushSize { + if w.err != nil { + n = 0 + return + } + w.write(w.bytes[:n]) + n = 0 + } + + w.nbytes = n +} + +// Write the header of a dynamic Huffman block to the output stream. +// +// numLiterals The number of literals specified in codegen +// numOffsets The number of offsets specified in codegen +// numCodegens The number of codegens used in codegen +func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { + if w.err != nil { + return + } + var firstBits int32 = 4 + if isEof { + firstBits = 5 + } + w.writeBits(firstBits, 3) + w.writeBits(int32(numLiterals-257), 5) + w.writeBits(int32(numOffsets-1), 5) + w.writeBits(int32(numCodegens-4), 4) + + for i := 0; i < numCodegens; i++ { + value := uint(w.codegenEncoding.codes[codegenOrder[i]].len()) + w.writeBits(int32(value), 3) + } + + i := 0 + for { + var codeWord = uint32(w.codegen[i]) + i++ + if codeWord == badCode { + break + } + w.writeCode(w.codegenEncoding.codes[codeWord]) + + switch codeWord { + case 16: + w.writeBits(int32(w.codegen[i]), 2) + i++ + case 17: + w.writeBits(int32(w.codegen[i]), 3) + i++ + case 18: + w.writeBits(int32(w.codegen[i]), 7) + i++ + } + } +} + +// writeStoredHeader will write a stored header. +// If the stored block is only used for EOF, +// it is replaced with a fixed huffman block. +func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { + if w.err != nil { + return + } + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + + // To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes. + if length == 0 && isEof { + w.writeFixedHeader(isEof) + // EOB: 7 bits, value: 0 + w.writeBits(0, 7) + w.flush() + return + } + + var flag int32 + if isEof { + flag = 1 + } + w.writeBits(flag, 3) + w.flush() + w.writeBits(int32(length), 16) + w.writeBits(int32(^uint16(length)), 16) +} + +func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { + if w.err != nil { + return + } + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + + // Indicate that we are a fixed Huffman block + var value int32 = 2 + if isEof { + value = 3 + } + w.writeBits(value, 3) +} + +// writeBlock will write a block of tokens with the smallest encoding. +// The original input can be supplied, and if the huffman encoded data +// is larger than the original bytes, the data will be written as a +// stored block. +// If the input is nil, the tokens will always be Huffman encoded. +func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { + if w.err != nil { + return + } + + tokens.AddEOB() + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + numLiterals, numOffsets := w.indexTokens(tokens, false) + w.generate() + var extraBits int + storedSize, storable := w.storedSize(input) + if storable { + extraBits = w.extraBitSize() + } + + // Figure out smallest code. + // Fixed Huffman baseline. + var literalEncoding = fixedLiteralEncoding + var offsetEncoding = fixedOffsetEncoding + var size = math.MaxInt32 + if tokens.n < maxPredefinedTokens { + size = w.fixedSize(extraBits) + } + + // Dynamic Huffman? + var numCodegens int + + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) + + if dynamicSize < size { + size = dynamicSize + literalEncoding = w.literalEncoding + offsetEncoding = w.offsetEncoding + } + + // Stored bytes? + if storable && storedSize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Huffman. + if literalEncoding == fixedLiteralEncoding { + w.writeFixedHeader(eof) + } else { + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + } + + // Write the tokens. + w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes) +} + +// writeBlockDynamic encodes a block using a dynamic Huffman table. +// This should be used if the symbols used have a disproportionate +// histogram distribution. +// If input is supplied and the compression savings are below 1/16th of the +// input size the block is stored. +func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) { + if w.err != nil { + return + } + + sync = sync || eof + if sync { + tokens.AddEOB() + } + + // We cannot reuse pure huffman table, and must mark as EOF. + if (w.lastHuffMan || eof) && w.lastHeader > 0 { + // We will not try to reuse. + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + w.lastHuffMan = false + } + + // fillReuse enables filling of empty values. + // This will make encodings always reusable without testing. + // However, this does not appear to benefit on most cases. + const fillReuse = false + + // Check if we can reuse... + if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) { + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + + numLiterals, numOffsets := w.indexTokens(tokens, !sync) + extraBits := 0 + ssize, storable := w.storedSize(input) + + const usePrefs = true + if storable || w.lastHeader > 0 { + extraBits = w.extraBitSize() + } + + var size int + + // Check if we should reuse. + if w.lastHeader > 0 { + // Estimate size for using a new table. + // Use the previous header size as the best estimate. + newSize := w.lastHeader + tokens.EstimatedBits() + newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty + + // The estimated size is calculated as an optimal table. + // We add a penalty to make it more realistic and re-use a bit more. + reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits + + // Check if a new table is better. + if newSize < reuseSize { + // Write the EOB we owe. + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + size = newSize + w.lastHeader = 0 + } else { + size = reuseSize + } + + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size { + // Check if we get a reasonable size decrease. + if storable && ssize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) + return + } + } + // Check if we get a reasonable size decrease. + if storable && ssize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + } + + // We want a new block/table + if w.lastHeader == 0 { + if fillReuse && !sync { + w.fillTokens() + numLiterals, numOffsets = maxNumLit, maxNumDist + } else { + w.literalFreq[endBlockMarker] = 1 + } + + w.generate() + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + + var numCodegens int + if fillReuse && !sync { + // Reindex for accurate size... + w.indexTokens(tokens, true) + } + size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) + + // Store predefined, if we don't get a reasonable improvement. + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size { + // Store bytes, if we don't get an improvement. + if storable && ssize <= preSize { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) + return + } + } + + if storable && ssize <= size { + // Store bytes, if we don't get an improvement. + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Write Huffman table. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + if !sync { + w.lastHeader, _ = w.headerSize() + } + w.lastHuffMan = false + } + + if sync { + w.lastHeader = 0 + } + // Write the tokens. + w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes) +} + +func (w *huffmanBitWriter) fillTokens() { + for i, v := range w.literalFreq[:literalCount] { + if v == 0 { + w.literalFreq[i] = 1 + } + } + for i, v := range w.offsetFreq[:offsetCodeCount] { + if v == 0 { + w.offsetFreq[i] = 1 + } + } +} + +// indexTokens indexes a slice of tokens, and updates +// literalFreq and offsetFreq, and generates literalEncoding +// and offsetEncoding. +// The number of literal and offset tokens is returned. +func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) { + //copy(w.literalFreq[:], t.litHist[:]) + *(*[256]uint16)(w.literalFreq[:]) = t.litHist + //copy(w.literalFreq[256:], t.extraHist[:]) + *(*[32]uint16)(w.literalFreq[256:]) = t.extraHist + w.offsetFreq = t.offHist + + if t.n == 0 { + return + } + if filled { + return maxNumLit, maxNumDist + } + // get the number of literals + numLiterals = len(w.literalFreq) + for w.literalFreq[numLiterals-1] == 0 { + numLiterals-- + } + // get the number of offsets + numOffsets = len(w.offsetFreq) + for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 { + numOffsets-- + } + if numOffsets == 0 { + // We haven't found a single match. If we want to go with the dynamic encoding, + // we should count at least one offset to be sure that the offset huffman tree could be encoded. + w.offsetFreq[0] = 1 + numOffsets = 1 + } + return +} + +func (w *huffmanBitWriter) generate() { + w.literalEncoding.generate(w.literalFreq[:literalCount], 15) + w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15) +} + +// writeTokens writes a slice of tokens to the output. +// codes for literal and offset encoding must be supplied. +func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) { + if w.err != nil { + return + } + if len(tokens) == 0 { + return + } + + // Only last token should be endBlockMarker. + var deferEOB bool + if tokens[len(tokens)-1] == endBlockMarker { + tokens = tokens[:len(tokens)-1] + deferEOB = true + } + + // Create slices up to the next power of two to avoid bounds checks. + lits := leCodes[:256] + offs := oeCodes[:32] + lengths := leCodes[lengthCodesStart:] + lengths = lengths[:32] + + // Go 1.16 LOVES having these on stack. + bits, nbits, nbytes := w.bits, w.nbits, w.nbytes + + for _, t := range tokens { + if t < 256 { + //w.writeCode(lits[t.literal()]) + c := lits[t] + bits |= c.code64() << (nbits & 63) + nbits += c.len() + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + continue + } + + // Write the length + length := t.length() + lengthCode := lengthCode(length) & 31 + if false { + w.writeCode(lengths[lengthCode]) + } else { + // inlined + c := lengths[lengthCode] + bits |= c.code64() << (nbits & 63) + nbits += c.len() + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + + if lengthCode >= lengthExtraBitsMinCode { + extraLengthBits := lengthExtraBits[lengthCode] + //w.writeBits(extraLength, extraLengthBits) + extraLength := int32(length - lengthBase[lengthCode]) + bits |= uint64(extraLength) << (nbits & 63) + nbits += extraLengthBits + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + // Write the offset + offset := t.offset() + offsetCode := (offset >> 16) & 31 + if false { + w.writeCode(offs[offsetCode]) + } else { + // inlined + c := offs[offsetCode] + bits |= c.code64() << (nbits & 63) + nbits += c.len() + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + + if offsetCode >= offsetExtraBitsMinCode { + offsetComb := offsetCombined[offsetCode] + //w.writeBits(extraOffset, extraOffsetBits) + bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) + nbits += uint8(offsetComb) + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + } + // Restore... + w.bits, w.nbits, w.nbytes = bits, nbits, nbytes + + if deferEOB { + w.writeCode(leCodes[endBlockMarker]) + } +} + +// huffOffset is a static offset encoder used for huffman only encoding. +// It can be reused since we will not be encoding offset values. +var huffOffset *huffmanEncoder + +func init() { + w := newHuffmanBitWriter(nil) + w.offsetFreq[0] = 1 + huffOffset = newHuffmanEncoder(offsetCodeCount) + huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15) +} + +// writeBlockHuff encodes a block of bytes as either +// Huffman encoded literals or uncompressed bytes if the +// results only gains very little from compression. +func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { + if w.err != nil { + return + } + + // Clear histogram + for i := range w.literalFreq[:] { + w.literalFreq[i] = 0 + } + if !w.lastHuffMan { + for i := range w.offsetFreq[:] { + w.offsetFreq[i] = 0 + } + } + + const numLiterals = endBlockMarker + 1 + const numOffsets = 1 + + // Add everything as literals + // We have to estimate the header size. + // Assume header is around 70 bytes: + // https://stackoverflow.com/a/25454430 + const guessHeaderSizeBits = 70 * 8 + histogram(input, w.literalFreq[:numLiterals]) + ssize, storable := w.storedSize(input) + if storable && len(input) > 1024 { + // Quick check for incompressible content. + abs := float64(0) + avg := float64(len(input)) / 256 + max := float64(len(input) * 2) + for _, v := range w.literalFreq[:256] { + diff := float64(v) - avg + abs += diff * diff + if abs > max { + break + } + } + if abs < max { + if debugDeflate { + fmt.Println("stored", abs, "<", max) + } + // No chance we can compress this... + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + } + w.literalFreq[endBlockMarker] = 1 + w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15) + estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals]) + if estBits < math.MaxInt32 { + estBits += w.lastHeader + if w.lastHeader == 0 { + estBits += guessHeaderSizeBits + } + estBits += estBits >> w.logNewTablePenalty + } + + // Store bytes, if we don't get a reasonable improvement. + if storable && ssize <= estBits { + if debugDeflate { + fmt.Println("stored,", ssize, "<=", estBits) + } + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + if w.lastHeader > 0 { + reuseSize := w.literalEncoding.canReuseBits(w.literalFreq[:256]) + + if estBits < reuseSize { + if debugDeflate { + fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes") + } + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } else if debugDeflate { + fmt.Println("reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8) + } + } + + count := 0 + if w.lastHeader == 0 { + // Use the temp encoding, so swap. + w.literalEncoding, w.tmpLitEncoding = w.tmpLitEncoding, w.literalEncoding + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + numCodegens := w.codegens() + + // Huffman. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + w.lastHuffMan = true + w.lastHeader, _ = w.headerSize() + if debugDeflate { + count += w.lastHeader + fmt.Println("header:", count/8) + } + } + + encoding := w.literalEncoding.codes[:256] + // Go 1.16 LOVES having these on stack. At least 1.5x the speed. + bits, nbits, nbytes := w.bits, w.nbits, w.nbytes + + if debugDeflate { + count -= int(nbytes)*8 + int(nbits) + } + // Unroll, write 3 codes/loop. + // Fastest number of unrolls. + for len(input) > 3 { + // We must have at least 48 bits free. + if nbits >= 8 { + n := nbits >> 3 + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + bits >>= (n * 8) & 63 + nbits -= n * 8 + nbytes += n + } + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + if debugDeflate { + count += int(nbytes) * 8 + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + a, b := encoding[input[0]], encoding[input[1]] + bits |= a.code64() << (nbits & 63) + bits |= b.code64() << ((nbits + a.len()) & 63) + c := encoding[input[2]] + nbits += b.len() + a.len() + bits |= c.code64() << (nbits & 63) + nbits += c.len() + input = input[3:] + } + + // Remaining... + for _, t := range input { + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + if debugDeflate { + count += int(nbytes) * 8 + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + // Bitwriting inlined, ~30% speedup + c := encoding[t] + bits |= c.code64() << (nbits & 63) + + nbits += c.len() + if debugDeflate { + count += int(c.len()) + } + } + // Restore... + w.bits, w.nbits, w.nbytes = bits, nbits, nbytes + + if debugDeflate { + nb := count + int(nbytes)*8 + int(nbits) + fmt.Println("wrote", nb, "bits,", nb/8, "bytes.") + } + // Flush if needed to have space. + if w.nbits >= 48 { + w.writeOutBits() + } + + if eof || sync { + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + w.lastHuffMan = false + } +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go new file mode 100644 index 00000000..be7b58b4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -0,0 +1,417 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "math" + "math/bits" +) + +const ( + maxBitsLimit = 16 + // number of valid literals + literalCount = 286 +) + +// hcode is a huffman code with a bit code and bit length. +type hcode uint32 + +func (h hcode) len() uint8 { + return uint8(h) +} + +func (h hcode) code64() uint64 { + return uint64(h >> 8) +} + +func (h hcode) zero() bool { + return h == 0 +} + +type huffmanEncoder struct { + codes []hcode + bitCount [17]int32 + + // Allocate a reusable buffer with the longest possible frequency table. + // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount. + // The largest of these is literalCount, so we allocate for that case. + freqcache [literalCount + 1]literalNode +} + +type literalNode struct { + literal uint16 + freq uint16 +} + +// A levelInfo describes the state of the constructed tree for a given depth. +type levelInfo struct { + // Our level. for better printing + level int32 + + // The frequency of the last node at this level + lastFreq int32 + + // The frequency of the next character to add to this level + nextCharFreq int32 + + // The frequency of the next pair (from level below) to add to this level. + // Only valid if the "needed" value of the next lower level is 0. + nextPairFreq int32 + + // The number of chains remaining to generate for this level before moving + // up to the next level + needed int32 +} + +// set sets the code and length of an hcode. +func (h *hcode) set(code uint16, length uint8) { + *h = hcode(length) | (hcode(code) << 8) +} + +func newhcode(code uint16, length uint8) hcode { + return hcode(length) | (hcode(code) << 8) +} + +func reverseBits(number uint16, bitLength byte) uint16 { + return bits.Reverse16(number << ((16 - bitLength) & 15)) +} + +func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} } + +func newHuffmanEncoder(size int) *huffmanEncoder { + // Make capacity to next power of two. + c := uint(bits.Len32(uint32(size - 1))) + return &huffmanEncoder{codes: make([]hcode, size, 1<= 3 +// The cases of 0, 1, and 2 literals are handled by special case code. +// +// list An array of the literals with non-zero frequencies +// +// and their associated frequencies. The array is in order of increasing +// frequency, and has as its last element a special element with frequency +// MaxInt32 +// +// maxBits The maximum number of bits that should be used to encode any literal. +// +// Must be less than 16. +// +// return An integer array in which array[i] indicates the number of literals +// +// that should be encoded in i bits. +func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { + if maxBits >= maxBitsLimit { + panic("flate: maxBits too large") + } + n := int32(len(list)) + list = list[0 : n+1] + list[n] = maxNode() + + // The tree can't have greater depth than n - 1, no matter what. This + // saves a little bit of work in some small cases + if maxBits > n-1 { + maxBits = n - 1 + } + + // Create information about each of the levels. + // A bogus "Level 0" whose sole purpose is so that + // level1.prev.needed==0. This makes level1.nextPairFreq + // be a legitimate value that never gets chosen. + var levels [maxBitsLimit]levelInfo + // leafCounts[i] counts the number of literals at the left + // of ancestors of the rightmost node at level i. + // leafCounts[i][j] is the number of literals at the left + // of the level j ancestor. + var leafCounts [maxBitsLimit][maxBitsLimit]int32 + + // Descending to only have 1 bounds check. + l2f := int32(list[2].freq) + l1f := int32(list[1].freq) + l0f := int32(list[0].freq) + int32(list[1].freq) + + for level := int32(1); level <= maxBits; level++ { + // For every level, the first two items are the first two characters. + // We initialize the levels as if we had already figured this out. + levels[level] = levelInfo{ + level: level, + lastFreq: l1f, + nextCharFreq: l2f, + nextPairFreq: l0f, + } + leafCounts[level][level] = 2 + if level == 1 { + levels[level].nextPairFreq = math.MaxInt32 + } + } + + // We need a total of 2*n - 2 items at top level and have already generated 2. + levels[maxBits].needed = 2*n - 4 + + level := uint32(maxBits) + for level < 16 { + l := &levels[level] + if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { + // We've run out of both leafs and pairs. + // End all calculations for this level. + // To make sure we never come back to this level or any lower level, + // set nextPairFreq impossibly large. + l.needed = 0 + levels[level+1].nextPairFreq = math.MaxInt32 + level++ + continue + } + + prevFreq := l.lastFreq + if l.nextCharFreq < l.nextPairFreq { + // The next item on this row is a leaf node. + n := leafCounts[level][level] + 1 + l.lastFreq = l.nextCharFreq + // Lower leafCounts are the same of the previous node. + leafCounts[level][level] = n + e := list[n] + if e.literal < math.MaxUint16 { + l.nextCharFreq = int32(e.freq) + } else { + l.nextCharFreq = math.MaxInt32 + } + } else { + // The next item on this row is a pair from the previous row. + // nextPairFreq isn't valid until we generate two + // more values in the level below + l.lastFreq = l.nextPairFreq + // Take leaf counts from the lower level, except counts[level] remains the same. + if true { + save := leafCounts[level][level] + leafCounts[level] = leafCounts[level-1] + leafCounts[level][level] = save + } else { + copy(leafCounts[level][:level], leafCounts[level-1][:level]) + } + levels[l.level-1].needed = 2 + } + + if l.needed--; l.needed == 0 { + // We've done everything we need to do for this level. + // Continue calculating one level up. Fill in nextPairFreq + // of that level with the sum of the two nodes we've just calculated on + // this level. + if l.level == maxBits { + // All done! + break + } + levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq + level++ + } else { + // If we stole from below, move down temporarily to replenish it. + for levels[level-1].needed > 0 { + level-- + } + } + } + + // Somethings is wrong if at the end, the top level is null or hasn't used + // all of the leaves. + if leafCounts[maxBits][maxBits] != n { + panic("leafCounts[maxBits][maxBits] != n") + } + + bitCount := h.bitCount[:maxBits+1] + bits := 1 + counts := &leafCounts[maxBits] + for level := maxBits; level > 0; level-- { + // chain.leafCount gives the number of literals requiring at least "bits" + // bits to encode. + bitCount[bits] = counts[level] - counts[level-1] + bits++ + } + return bitCount +} + +// Look at the leaves and assign them a bit count and an encoding as specified +// in RFC 1951 3.2.2 +func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) { + code := uint16(0) + for n, bits := range bitCount { + code <<= 1 + if n == 0 || bits == 0 { + continue + } + // The literals list[len(list)-bits] .. list[len(list)-bits] + // are encoded using "bits" bits, and get the values + // code, code + 1, .... The code values are + // assigned in literal order (not frequency order). + chunk := list[len(list)-int(bits):] + + sortByLiteral(chunk) + for _, node := range chunk { + h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n)) + code++ + } + list = list[0 : len(list)-int(bits)] + } +} + +// Update this Huffman Code object to be the minimum code for the specified frequency count. +// +// freq An array of frequencies, in which frequency[i] gives the frequency of literal i. +// maxBits The maximum number of bits to use for any literal. +func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { + list := h.freqcache[:len(freq)+1] + codes := h.codes[:len(freq)] + // Number of non-zero literals + count := 0 + // Set list to be the set of all non-zero literals and their frequencies + for i, f := range freq { + if f != 0 { + list[count] = literalNode{uint16(i), f} + count++ + } else { + codes[i] = 0 + } + } + list[count] = literalNode{} + + list = list[:count] + if count <= 2 { + // Handle the small cases here, because they are awkward for the general case code. With + // two or fewer literals, everything has bit length 1. + for i, node := range list { + // "list" is in order of increasing literal value. + h.codes[node.literal].set(uint16(i), 1) + } + return + } + sortByFreq(list) + + // Get the number of literals for each bit count + bitCount := h.bitCounts(list, maxBits) + // And do the assignment + h.assignEncodingAndSize(bitCount, list) +} + +// atLeastOne clamps the result between 1 and 15. +func atLeastOne(v float32) float32 { + if v < 1 { + return 1 + } + if v > 15 { + return 15 + } + return v +} + +func histogram(b []byte, h []uint16) { + if true && len(b) >= 8<<10 { + // Split for bigger inputs + histogramSplit(b, h) + } else { + h = h[:256] + for _, t := range b { + h[t]++ + } + } +} + +func histogramSplit(b []byte, h []uint16) { + // Tested, and slightly faster than 2-way. + // Writing to separate arrays and combining is also slightly slower. + h = h[:256] + for len(b)&3 != 0 { + h[b[0]]++ + b = b[1:] + } + n := len(b) / 4 + x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:] + y, z, w = y[:len(x)], z[:len(x)], w[:len(x)] + for i, t := range x { + v0 := &h[t] + v1 := &h[y[i]] + v3 := &h[w[i]] + v2 := &h[z[i]] + *v0++ + *v1++ + *v2++ + *v3++ + } +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go new file mode 100644 index 00000000..20778029 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go @@ -0,0 +1,178 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// Sort sorts data. +// It makes one call to data.Len to determine n, and O(n*log(n)) calls to +// data.Less and data.Swap. The sort is not guaranteed to be stable. +func sortByFreq(data []literalNode) { + n := len(data) + quickSortByFreq(data, 0, n, maxDepth(n)) +} + +func quickSortByFreq(data []literalNode, a, b, maxDepth int) { + for b-a > 12 { // Use ShellSort for slices <= 12 elements + if maxDepth == 0 { + heapSort(data, a, b) + return + } + maxDepth-- + mlo, mhi := doPivotByFreq(data, a, b) + // Avoiding recursion on the larger subproblem guarantees + // a stack depth of at most lg(b-a). + if mlo-a < b-mhi { + quickSortByFreq(data, a, mlo, maxDepth) + a = mhi // i.e., quickSortByFreq(data, mhi, b) + } else { + quickSortByFreq(data, mhi, b, maxDepth) + b = mlo // i.e., quickSortByFreq(data, a, mlo) + } + } + if b-a > 1 { + // Do ShellSort pass with gap 6 + // It could be written in this simplified form cause b-a <= 12 + for i := a + 6; i < b; i++ { + if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq { + data[i], data[i-6] = data[i-6], data[i] + } + } + insertionSortByFreq(data, a, b) + } +} + +// siftDownByFreq implements the heap property on data[lo, hi). +// first is an offset into the array where the root of the heap lies. +func siftDownByFreq(data []literalNode, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && (data[first+child].freq == data[first+child+1].freq && data[first+child].literal < data[first+child+1].literal || data[first+child].freq < data[first+child+1].freq) { + child++ + } + if data[first+root].freq == data[first+child].freq && data[first+root].literal > data[first+child].literal || data[first+root].freq > data[first+child].freq { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} +func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) { + m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. + if hi-lo > 40 { + // Tukey's ``Ninther,'' median of three medians of three. + s := (hi - lo) / 8 + medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s) + medianOfThreeSortByFreq(data, m, m-s, m+s) + medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s) + } + medianOfThreeSortByFreq(data, lo, m, hi-1) + + // Invariants are: + // data[lo] = pivot (set up by ChoosePivot) + // data[lo < i < a] < pivot + // data[a <= i < b] <= pivot + // data[b <= i < c] unexamined + // data[c <= i < hi-1] > pivot + // data[hi-1] >= pivot + pivot := lo + a, c := lo+1, hi-1 + + for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { + } + b := a + for { + for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot + } + for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot + } + if b >= c { + break + } + // data[b] > pivot; data[c-1] <= pivot + data[b], data[c-1] = data[c-1], data[b] + b++ + c-- + } + // If hi-c<3 then there are duplicates (by property of median of nine). + // Let's be a bit more conservative, and set border to 5. + protect := hi-c < 5 + if !protect && hi-c < (hi-lo)/4 { + // Lets test some points for equality to pivot + dups := 0 + if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot + data[c], data[hi-1] = data[hi-1], data[c] + c++ + dups++ + } + if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot + b-- + dups++ + } + // m-lo = (hi-lo)/2 > 6 + // b-lo > (hi-lo)*3/4-1 > 8 + // ==> m < b ==> data[m] <= pivot + if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot + data[m], data[b-1] = data[b-1], data[m] + b-- + dups++ + } + // if at least 2 points are equal to pivot, assume skewed distribution + protect = dups > 1 + } + if protect { + // Protect against a lot of duplicates + // Add invariant: + // data[a <= i < b] unexamined + // data[b <= i < c] = pivot + for { + for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot + } + for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot + } + if a >= b { + break + } + // data[a] == pivot; data[b-1] < pivot + data[a], data[b-1] = data[b-1], data[a] + a++ + b-- + } + } + // Swap pivot into middle + data[pivot], data[b-1] = data[b-1], data[pivot] + return b - 1, c +} + +// Insertion sort +func insertionSortByFreq(data []literalNode, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// quickSortByFreq, loosely following Bentley and McIlroy, +// ``Engineering a Sort Function,'' SP&E November 1993. + +// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. +func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) { + // sort 3 elements + if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { + data[m1], data[m0] = data[m0], data[m1] + } + // data[m0] <= data[m1] + if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq { + data[m2], data[m1] = data[m1], data[m2] + // data[m0] <= data[m2] && data[m1] < data[m2] + if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { + data[m1], data[m0] = data[m0], data[m1] + } + } + // now data[m0] <= data[m1] <= data[m2] +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go new file mode 100644 index 00000000..93f1aea1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go @@ -0,0 +1,201 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// Sort sorts data. +// It makes one call to data.Len to determine n, and O(n*log(n)) calls to +// data.Less and data.Swap. The sort is not guaranteed to be stable. +func sortByLiteral(data []literalNode) { + n := len(data) + quickSort(data, 0, n, maxDepth(n)) +} + +func quickSort(data []literalNode, a, b, maxDepth int) { + for b-a > 12 { // Use ShellSort for slices <= 12 elements + if maxDepth == 0 { + heapSort(data, a, b) + return + } + maxDepth-- + mlo, mhi := doPivot(data, a, b) + // Avoiding recursion on the larger subproblem guarantees + // a stack depth of at most lg(b-a). + if mlo-a < b-mhi { + quickSort(data, a, mlo, maxDepth) + a = mhi // i.e., quickSort(data, mhi, b) + } else { + quickSort(data, mhi, b, maxDepth) + b = mlo // i.e., quickSort(data, a, mlo) + } + } + if b-a > 1 { + // Do ShellSort pass with gap 6 + // It could be written in this simplified form cause b-a <= 12 + for i := a + 6; i < b; i++ { + if data[i].literal < data[i-6].literal { + data[i], data[i-6] = data[i-6], data[i] + } + } + insertionSort(data, a, b) + } +} +func heapSort(data []literalNode, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDown(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDown(data, lo, i, first) + } +} + +// siftDown implements the heap property on data[lo, hi). +// first is an offset into the array where the root of the heap lies. +func siftDown(data []literalNode, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && data[first+child].literal < data[first+child+1].literal { + child++ + } + if data[first+root].literal > data[first+child].literal { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} +func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) { + m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. + if hi-lo > 40 { + // Tukey's ``Ninther,'' median of three medians of three. + s := (hi - lo) / 8 + medianOfThree(data, lo, lo+s, lo+2*s) + medianOfThree(data, m, m-s, m+s) + medianOfThree(data, hi-1, hi-1-s, hi-1-2*s) + } + medianOfThree(data, lo, m, hi-1) + + // Invariants are: + // data[lo] = pivot (set up by ChoosePivot) + // data[lo < i < a] < pivot + // data[a <= i < b] <= pivot + // data[b <= i < c] unexamined + // data[c <= i < hi-1] > pivot + // data[hi-1] >= pivot + pivot := lo + a, c := lo+1, hi-1 + + for ; a < c && data[a].literal < data[pivot].literal; a++ { + } + b := a + for { + for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot + } + for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot + } + if b >= c { + break + } + // data[b] > pivot; data[c-1] <= pivot + data[b], data[c-1] = data[c-1], data[b] + b++ + c-- + } + // If hi-c<3 then there are duplicates (by property of median of nine). + // Let's be a bit more conservative, and set border to 5. + protect := hi-c < 5 + if !protect && hi-c < (hi-lo)/4 { + // Lets test some points for equality to pivot + dups := 0 + if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot + data[c], data[hi-1] = data[hi-1], data[c] + c++ + dups++ + } + if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot + b-- + dups++ + } + // m-lo = (hi-lo)/2 > 6 + // b-lo > (hi-lo)*3/4-1 > 8 + // ==> m < b ==> data[m] <= pivot + if data[m].literal > data[pivot].literal { // data[m] = pivot + data[m], data[b-1] = data[b-1], data[m] + b-- + dups++ + } + // if at least 2 points are equal to pivot, assume skewed distribution + protect = dups > 1 + } + if protect { + // Protect against a lot of duplicates + // Add invariant: + // data[a <= i < b] unexamined + // data[b <= i < c] = pivot + for { + for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot + } + for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot + } + if a >= b { + break + } + // data[a] == pivot; data[b-1] < pivot + data[a], data[b-1] = data[b-1], data[a] + a++ + b-- + } + } + // Swap pivot into middle + data[pivot], data[b-1] = data[b-1], data[pivot] + return b - 1, c +} + +// Insertion sort +func insertionSort(data []literalNode, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && data[j].literal < data[j-1].literal; j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// maxDepth returns a threshold at which quicksort should switch +// to heapsort. It returns 2*ceil(lg(n+1)). +func maxDepth(n int) int { + var depth int + for i := n; i > 0; i >>= 1 { + depth++ + } + return depth * 2 +} + +// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. +func medianOfThree(data []literalNode, m1, m0, m2 int) { + // sort 3 elements + if data[m1].literal < data[m0].literal { + data[m1], data[m0] = data[m0], data[m1] + } + // data[m0] <= data[m1] + if data[m2].literal < data[m1].literal { + data[m2], data[m1] = data[m1], data[m2] + // data[m0] <= data[m2] && data[m1] < data[m2] + if data[m1].literal < data[m0].literal { + data[m1], data[m0] = data[m0], data[m1] + } + } + // now data[m0] <= data[m1] <= data[m2] +} diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go new file mode 100644 index 00000000..414c0bea --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -0,0 +1,793 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package flate implements the DEFLATE compressed data format, described in +// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file +// formats. +package flate + +import ( + "bufio" + "compress/flate" + "fmt" + "io" + "math/bits" + "sync" +) + +const ( + maxCodeLen = 16 // max length of Huffman code + maxCodeLenMask = 15 // mask for max length of Huffman code + // The next three numbers come from the RFC section 3.2.7, with the + // additional proviso in section 3.2.5 which implies that distance codes + // 30 and 31 should never occur in compressed data. + maxNumLit = 286 + maxNumDist = 30 + numCodes = 19 // number of codes in Huffman meta-code + + debugDecode = false +) + +// Value of length - 3 and extra bits. +type lengthExtra struct { + length, extra uint8 +} + +var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}} + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// Initialize the fixedHuffmanDecoder only once upon first use. +var fixedOnce sync.Once +var fixedHuffmanDecoder huffmanDecoder + +// A CorruptInputError reports the presence of corrupt input at a given offset. +type CorruptInputError = flate.CorruptInputError + +// An InternalError reports an error in the flate code itself. +type InternalError string + +func (e InternalError) Error() string { return "flate: internal error: " + string(e) } + +// A ReadError reports an error encountered while reading input. +// +// Deprecated: No longer returned. +type ReadError = flate.ReadError + +// A WriteError reports an error encountered while writing output. +// +// Deprecated: No longer returned. +type WriteError = flate.WriteError + +// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to +// to switch to a new underlying Reader. This permits reusing a ReadCloser +// instead of allocating a new one. +type Resetter interface { + // Reset discards any buffered data and resets the Resetter as if it was + // newly initialized with the given reader. + Reset(r io.Reader, dict []byte) error +} + +// The data structure for decoding Huffman tables is based on that of +// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits), +// For codes smaller than the table width, there are multiple entries +// (each combination of trailing bits has the same value). For codes +// larger than the table width, the table contains a link to an overflow +// table. The width of each entry in the link table is the maximum code +// size minus the chunk width. +// +// Note that you can do a lookup in the table even without all bits +// filled. Since the extra bits are zero, and the DEFLATE Huffman codes +// have the property that shorter codes come before longer ones, the +// bit length estimate in the result is a lower bound on the actual +// number of bits. +// +// See the following: +// http://www.gzip.org/algorithm.txt + +// chunk & 15 is number of bits +// chunk >> 4 is value, including table link + +const ( + huffmanChunkBits = 9 + huffmanNumChunks = 1 << huffmanChunkBits + huffmanCountMask = 15 + huffmanValueShift = 4 +) + +type huffmanDecoder struct { + maxRead int // the maximum number of bits we can read and not overread + chunks *[huffmanNumChunks]uint16 // chunks as described above + links [][]uint16 // overflow links + linkMask uint32 // mask the width of the link table +} + +// Initialize Huffman decoding tables from array of code lengths. +// Following this function, h is guaranteed to be initialized into a complete +// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a +// degenerate case where the tree has only a single symbol with length 1. Empty +// trees are permitted. +func (h *huffmanDecoder) init(lengths []int) bool { + // Sanity enables additional runtime tests during Huffman + // table construction. It's intended to be used during + // development to supplement the currently ad-hoc unit tests. + const sanity = false + + if h.chunks == nil { + h.chunks = &[huffmanNumChunks]uint16{} + } + if h.maxRead != 0 { + *h = huffmanDecoder{chunks: h.chunks, links: h.links} + } + + // Count number of codes of each length, + // compute maxRead and max length. + var count [maxCodeLen]int + var min, max int + for _, n := range lengths { + if n == 0 { + continue + } + if min == 0 || n < min { + min = n + } + if n > max { + max = n + } + count[n&maxCodeLenMask]++ + } + + // Empty tree. The decompressor.huffSym function will fail later if the tree + // is used. Technically, an empty tree is only valid for the HDIST tree and + // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree + // is guaranteed to fail since it will attempt to use the tree to decode the + // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is + // guaranteed to fail later since the compressed data section must be + // composed of at least one symbol (the end-of-block marker). + if max == 0 { + return true + } + + code := 0 + var nextcode [maxCodeLen]int + for i := min; i <= max; i++ { + code <<= 1 + nextcode[i&maxCodeLenMask] = code + code += count[i&maxCodeLenMask] + } + + // Check that the coding is complete (i.e., that we've + // assigned all 2-to-the-max possible bit sequences). + // Exception: To be compatible with zlib, we also need to + // accept degenerate single-code codings. See also + // TestDegenerateHuffmanCoding. + if code != 1< huffmanChunkBits { + numLinks := 1 << (uint(max) - huffmanChunkBits) + h.linkMask = uint32(numLinks - 1) + + // create link tables + link := nextcode[huffmanChunkBits+1] >> 1 + if cap(h.links) < huffmanNumChunks-link { + h.links = make([][]uint16, huffmanNumChunks-link) + } else { + h.links = h.links[:huffmanNumChunks-link] + } + for j := uint(link); j < huffmanNumChunks; j++ { + reverse := int(bits.Reverse16(uint16(j))) + reverse >>= uint(16 - huffmanChunkBits) + off := j - uint(link) + if sanity && h.chunks[reverse] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[reverse] = uint16(off<>= uint(16 - n) + if n <= huffmanChunkBits { + for off := reverse; off < len(h.chunks); off += 1 << uint(n) { + // We should never need to overwrite + // an existing chunk. Also, 0 is + // never a valid chunk, because the + // lower 4 "count" bits should be + // between 1 and 15. + if sanity && h.chunks[off] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[off] = chunk + } + } else { + j := reverse & (huffmanNumChunks - 1) + if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { + // Longer codes should have been + // associated with a link table above. + panic("impossible: not an indirect chunk") + } + value := h.chunks[j] >> huffmanValueShift + linktab := h.links[value] + reverse >>= huffmanChunkBits + for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { + if sanity && linktab[off] != 0 { + panic("impossible: overwriting existing chunk") + } + linktab[off] = chunk + } + } + } + + if sanity { + // Above we've sanity checked that we never overwrote + // an existing entry. Here we additionally check that + // we filled the tables completely. + for i, chunk := range h.chunks { + if chunk == 0 { + // As an exception, in the degenerate + // single-code case, we allow odd + // chunks to be missing. + if code == 1 && i%2 == 1 { + continue + } + panic("impossible: missing chunk") + } + } + for _, linktab := range h.links { + for _, chunk := range linktab { + if chunk == 0 { + panic("impossible: missing chunk") + } + } + } + } + + return true +} + +// The actual read interface needed by NewReader. +// If the passed in io.Reader does not also have ReadByte, +// the NewReader will introduce its own buffering. +type Reader interface { + io.Reader + io.ByteReader +} + +// Decompress state. +type decompressor struct { + // Input source. + r Reader + roffset int64 + + // Huffman decoders for literal/length, distance. + h1, h2 huffmanDecoder + + // Length arrays used to define Huffman codes. + bits *[maxNumLit + maxNumDist]int + codebits *[numCodes]int + + // Output history, buffer. + dict dictDecoder + + // Next step in the decompression, + // and decompression state. + step func(*decompressor) + stepState int + err error + toRead []byte + hl, hd *huffmanDecoder + copyLen int + copyDist int + + // Temporary buffer (avoids repeated allocation). + buf [4]byte + + // Input bits, in top of b. + b uint32 + + nb uint + final bool +} + +func (f *decompressor) nextBlock() { + for f.nb < 1+2 { + if f.err = f.moreBits(); f.err != nil { + return + } + } + f.final = f.b&1 == 1 + f.b >>= 1 + typ := f.b & 3 + f.b >>= 2 + f.nb -= 1 + 2 + switch typ { + case 0: + f.dataBlock() + if debugDecode { + fmt.Println("stored block") + } + case 1: + // compressed, fixed Huffman tables + f.hl = &fixedHuffmanDecoder + f.hd = nil + f.huffmanBlockDecoder()() + if debugDecode { + fmt.Println("predefinied huffman block") + } + case 2: + // compressed, dynamic Huffman tables + if f.err = f.readHuffman(); f.err != nil { + break + } + f.hl = &f.h1 + f.hd = &f.h2 + f.huffmanBlockDecoder()() + if debugDecode { + fmt.Println("dynamic huffman block") + } + default: + // 3 is reserved. + if debugDecode { + fmt.Println("reserved data block encountered") + } + f.err = CorruptInputError(f.roffset) + } +} + +func (f *decompressor) Read(b []byte) (int, error) { + for { + if len(f.toRead) > 0 { + n := copy(b, f.toRead) + f.toRead = f.toRead[n:] + if len(f.toRead) == 0 { + return n, f.err + } + return n, nil + } + if f.err != nil { + return 0, f.err + } + f.step(f) + if f.err != nil && len(f.toRead) == 0 { + f.toRead = f.dict.readFlush() // Flush what's left in case of error + } + } +} + +// Support the io.WriteTo interface for io.Copy and friends. +func (f *decompressor) WriteTo(w io.Writer) (int64, error) { + total := int64(0) + flushed := false + for { + if len(f.toRead) > 0 { + n, err := w.Write(f.toRead) + total += int64(n) + if err != nil { + f.err = err + return total, err + } + if n != len(f.toRead) { + return total, io.ErrShortWrite + } + f.toRead = f.toRead[:0] + } + if f.err != nil && flushed { + if f.err == io.EOF { + return total, nil + } + return total, f.err + } + if f.err == nil { + f.step(f) + } + if len(f.toRead) == 0 && f.err != nil && !flushed { + f.toRead = f.dict.readFlush() // Flush what's left in case of error + flushed = true + } + } +} + +func (f *decompressor) Close() error { + if f.err == io.EOF { + return nil + } + return f.err +} + +// RFC 1951 section 3.2.7. +// Compression with dynamic Huffman codes + +var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +func (f *decompressor) readHuffman() error { + // HLIT[5], HDIST[5], HCLEN[4]. + for f.nb < 5+5+4 { + if err := f.moreBits(); err != nil { + return err + } + } + nlit := int(f.b&0x1F) + 257 + if nlit > maxNumLit { + if debugDecode { + fmt.Println("nlit > maxNumLit", nlit) + } + return CorruptInputError(f.roffset) + } + f.b >>= 5 + ndist := int(f.b&0x1F) + 1 + if ndist > maxNumDist { + if debugDecode { + fmt.Println("ndist > maxNumDist", ndist) + } + return CorruptInputError(f.roffset) + } + f.b >>= 5 + nclen := int(f.b&0xF) + 4 + // numCodes is 19, so nclen is always valid. + f.b >>= 4 + f.nb -= 5 + 5 + 4 + + // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. + for i := 0; i < nclen; i++ { + for f.nb < 3 { + if err := f.moreBits(); err != nil { + return err + } + } + f.codebits[codeOrder[i]] = int(f.b & 0x7) + f.b >>= 3 + f.nb -= 3 + } + for i := nclen; i < len(codeOrder); i++ { + f.codebits[codeOrder[i]] = 0 + } + if !f.h1.init(f.codebits[0:]) { + if debugDecode { + fmt.Println("init codebits failed") + } + return CorruptInputError(f.roffset) + } + + // HLIT + 257 code lengths, HDIST + 1 code lengths, + // using the code length Huffman code. + for i, n := 0, nlit+ndist; i < n; { + x, err := f.huffSym(&f.h1) + if err != nil { + return err + } + if x < 16 { + // Actual length. + f.bits[i] = x + i++ + continue + } + // Repeat previous length or zero. + var rep int + var nb uint + var b int + switch x { + default: + return InternalError("unexpected length code") + case 16: + rep = 3 + nb = 2 + if i == 0 { + if debugDecode { + fmt.Println("i==0") + } + return CorruptInputError(f.roffset) + } + b = f.bits[i-1] + case 17: + rep = 3 + nb = 3 + b = 0 + case 18: + rep = 11 + nb = 7 + b = 0 + } + for f.nb < nb { + if err := f.moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits:", err) + } + return err + } + } + rep += int(f.b & uint32(1<<(nb®SizeMaskUint32)-1)) + f.b >>= nb & regSizeMaskUint32 + f.nb -= nb + if i+rep > n { + if debugDecode { + fmt.Println("i+rep > n", i, rep, n) + } + return CorruptInputError(f.roffset) + } + for j := 0; j < rep; j++ { + f.bits[i] = b + i++ + } + } + + if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) { + if debugDecode { + fmt.Println("init2 failed") + } + return CorruptInputError(f.roffset) + } + + // As an optimization, we can initialize the maxRead bits to read at a time + // for the HLIT tree to the length of the EOB marker since we know that + // every block must terminate with one. This preserves the property that + // we never read any extra bytes after the end of the DEFLATE stream. + if f.h1.maxRead < f.bits[endBlockMarker] { + f.h1.maxRead = f.bits[endBlockMarker] + } + if !f.final { + // If not the final block, the smallest block possible is + // a predefined table, BTYPE=01, with a single EOB marker. + // This will take up 3 + 7 bits. + f.h1.maxRead += 10 + } + + return nil +} + +// Copy a single uncompressed data block from input to output. +func (f *decompressor) dataBlock() { + // Uncompressed. + // Discard current half-byte. + left := (f.nb) & 7 + f.nb -= left + f.b >>= left + + offBytes := f.nb >> 3 + // Unfilled values will be overwritten. + f.buf[0] = uint8(f.b) + f.buf[1] = uint8(f.b >> 8) + f.buf[2] = uint8(f.b >> 16) + f.buf[3] = uint8(f.b >> 24) + + f.roffset += int64(offBytes) + f.nb, f.b = 0, 0 + + // Length then ones-complement of length. + nr, err := io.ReadFull(f.r, f.buf[offBytes:4]) + f.roffset += int64(nr) + if err != nil { + f.err = noEOF(err) + return + } + n := uint16(f.buf[0]) | uint16(f.buf[1])<<8 + nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8 + if nn != ^n { + if debugDecode { + ncomp := ^n + fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp) + } + f.err = CorruptInputError(f.roffset) + return + } + + if n == 0 { + f.toRead = f.dict.readFlush() + f.finishBlock() + return + } + + f.copyLen = int(n) + f.copyData() +} + +// copyData copies f.copyLen bytes from the underlying reader into f.hist. +// It pauses for reads when f.hist is full. +func (f *decompressor) copyData() { + buf := f.dict.writeSlice() + if len(buf) > f.copyLen { + buf = buf[:f.copyLen] + } + + cnt, err := io.ReadFull(f.r, buf) + f.roffset += int64(cnt) + f.copyLen -= cnt + f.dict.writeMark(cnt) + if err != nil { + f.err = noEOF(err) + return + } + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).copyData + return + } + f.finishBlock() +} + +func (f *decompressor) finishBlock() { + if f.final { + if f.dict.availRead() > 0 { + f.toRead = f.dict.readFlush() + } + f.err = io.EOF + } + f.step = (*decompressor).nextBlock +} + +// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF. +func noEOF(e error) error { + if e == io.EOF { + return io.ErrUnexpectedEOF + } + return e +} + +func (f *decompressor) moreBits() error { + c, err := f.r.ReadByte() + if err != nil { + return noEOF(err) + } + f.roffset++ + f.b |= uint32(c) << (f.nb & regSizeMaskUint32) + f.nb += 8 + return nil +} + +// Read the next Huffman-encoded symbol from f according to h. +func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(h.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := f.r.ReadByte() + if err != nil { + f.b = b + f.nb = nb + return 0, noEOF(err) + } + f.roffset++ + b |= uint32(c) << (nb & regSizeMaskUint32) + nb += 8 + } + chunk := h.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return 0, f.err + } + f.b = b >> (n & regSizeMaskUint32) + f.nb = nb - n + return int(chunk >> huffmanValueShift), nil + } + } +} + +func makeReader(r io.Reader) Reader { + if rr, ok := r.(Reader); ok { + return rr + } + return bufio.NewReader(r) +} + +func fixedHuffmanDecoderInit() { + fixedOnce.Do(func() { + // These come from the RFC section 3.2.6. + var bits [288]int + for i := 0; i < 144; i++ { + bits[i] = 8 + } + for i := 144; i < 256; i++ { + bits[i] = 9 + } + for i := 256; i < 280; i++ { + bits[i] = 7 + } + for i := 280; i < 288; i++ { + bits[i] = 8 + } + fixedHuffmanDecoder.init(bits[:]) + }) +} + +func (f *decompressor) Reset(r io.Reader, dict []byte) error { + *f = decompressor{ + r: makeReader(r), + bits: f.bits, + codebits: f.codebits, + h1: f.h1, + h2: f.h2, + dict: f.dict, + step: (*decompressor).nextBlock, + } + f.dict.init(maxMatchOffset, dict) + return nil +} + +// NewReader returns a new ReadCloser that can be used +// to read the uncompressed version of r. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser +// when finished reading. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReader(r io.Reader) io.ReadCloser { + fixedHuffmanDecoderInit() + + var f decompressor + f.r = makeReader(r) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = (*decompressor).nextBlock + f.dict.init(maxMatchOffset, nil) + return &f +} + +// NewReaderDict is like NewReader but initializes the reader +// with a preset dictionary. The returned Reader behaves as if +// the uncompressed data stream started with the given dictionary, +// which has already been read. NewReaderDict is typically used +// to read data compressed by NewWriterDict. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { + fixedHuffmanDecoderInit() + + var f decompressor + f.r = makeReader(r) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = (*decompressor).nextBlock + f.dict.init(maxMatchOffset, dict) + return &f +} diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go new file mode 100644 index 00000000..61342b6b --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go @@ -0,0 +1,1283 @@ +// Code generated by go generate gen_inflate.go. DO NOT EDIT. + +package flate + +import ( + "bufio" + "bytes" + "fmt" + "math/bits" + "strings" +) + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBytesBuffer() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bytes.Buffer) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanBytesBuffer + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBytesReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bytes.Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanBytesReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanBytesReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBufioReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bufio.Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanBufioReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanBufioReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanStringsReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*strings.Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanStringsReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanStringsReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanGenericReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanGenericReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanGenericReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +func (f *decompressor) huffmanBlockDecoder() func() { + switch f.r.(type) { + case *bytes.Buffer: + return f.huffmanBytesBuffer + case *bytes.Reader: + return f.huffmanBytesReader + case *bufio.Reader: + return f.huffmanBufioReader + case *strings.Reader: + return f.huffmanStringsReader + case Reader: + return f.huffmanGenericReader + default: + return f.huffmanGenericReader + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go new file mode 100644 index 00000000..703b9a89 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level1.go @@ -0,0 +1,241 @@ +package flate + +import ( + "encoding/binary" + "fmt" + "math/bits" +) + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastEncL1 struct { + fastGen + table [tableSize]tableEntry +} + +// EncodeL1 uses a similar algorithm to level 1 +func (e *fastEncL1) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashBytes = 5 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + + for { + const skipLog = 5 + const doEvery = 2 + + nextS := s + var candidate tableEntry + for { + nextHash := hashLen(cv, tableBits, hashBytes) + candidate = e.table[nextHash] + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + + now := load6432(src, nextS) + e.table[nextHash] = tableEntry{offset: s + e.cur} + nextHash = hashLen(now, tableBits, hashBytes) + + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} + break + } + + // Do one right away... + cv = now + s = nextS + nextS++ + candidate = e.table[nextHash] + now >>= 8 + e.table[nextHash] = tableEntry{offset: s + e.cur} + + offset = s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} + break + } + cv = now + s = nextS + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + t := candidate.offset - e.cur + var l = int32(4) + if false { + l = e.matchlenLong(s+4, t+4, src) + 4 + } else { + // inlined: + a := src[s+4:] + b := src[t+4:] + for len(a) >= 8 { + if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { + l += int32(bits.TrailingZeros64(diff) >> 3) + break + } + l += 8 + a = a[8:] + b = b[8:] + } + if len(a) < 8 { + b = b[:len(a)] + for i := range a { + if a[i] != b[i] { + break + } + l++ + } + } + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + // Save the match found + if false { + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + } else { + // Inlined... + xoffset := uint32(s - t - baseMatchOffset) + xlength := l + oc := offsetCode(xoffset) + xoffset |= oc << 16 + for xlength > 0 { + xl := xlength + if xl > 258 { + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } + } + xlength -= xl + xl -= baseMatchLength + dst.extraHist[lengthCodes1[uint8(xl)]]++ + dst.offHist[oc]++ + dst.tokens[dst.n] = token(matchType | uint32(xl)<= s { + s = nextS + 1 + } + if s >= sLimit { + // Index first pair after match end. + if int(s+l+8) < len(src) { + cv := load6432(src, s) + e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur} + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-2) + o := e.cur + s - 2 + prevHash := hashLen(x, tableBits, hashBytes) + e.table[prevHash] = tableEntry{offset: o} + x >>= 16 + currHash := hashLen(x, tableBits, hashBytes) + candidate = e.table[currHash] + e.table[currHash] = tableEntry{offset: o + 2} + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) { + cv = x >> 8 + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go new file mode 100644 index 00000000..876dfbe3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level2.go @@ -0,0 +1,214 @@ +package flate + +import "fmt" + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastEncL2 struct { + fastGen + table [bTableSize]tableEntry +} + +// EncodeL2 uses a similar algorithm to level 1, but is capable +// of matching across blocks giving better compression at a small slowdown. +func (e *fastEncL2) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashBytes = 5 + ) + + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + // When should we start skipping if we haven't found matches in a long while. + const skipLog = 5 + const doEvery = 2 + + nextS := s + var candidate tableEntry + for { + nextHash := hashLen(cv, bTableBits, hashBytes) + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + candidate = e.table[nextHash] + now := load6432(src, nextS) + e.table[nextHash] = tableEntry{offset: s + e.cur} + nextHash = hashLen(now, bTableBits, hashBytes) + + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} + break + } + + // Do one right away... + cv = now + s = nextS + nextS++ + candidate = e.table[nextHash] + now >>= 8 + e.table[nextHash] = tableEntry{offset: s + e.cur} + + offset = s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + break + } + cv = now + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + t := candidate.offset - e.cur + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + // Index first pair after match end. + if int(s+l+8) < len(src) { + cv := load6432(src, s) + e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur} + } + goto emitRemainder + } + + // Store every second hash in-between, but offset by 1. + for i := s - l + 2; i < s-5; i += 7 { + x := load6432(src, i) + nextHash := hashLen(x, bTableBits, hashBytes) + e.table[nextHash] = tableEntry{offset: e.cur + i} + // Skip one + x >>= 16 + nextHash = hashLen(x, bTableBits, hashBytes) + e.table[nextHash] = tableEntry{offset: e.cur + i + 2} + // Skip one + x >>= 16 + nextHash = hashLen(x, bTableBits, hashBytes) + e.table[nextHash] = tableEntry{offset: e.cur + i + 4} + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 to s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-2) + o := e.cur + s - 2 + prevHash := hashLen(x, bTableBits, hashBytes) + prevHash2 := hashLen(x>>8, bTableBits, hashBytes) + e.table[prevHash] = tableEntry{offset: o} + e.table[prevHash2] = tableEntry{offset: o + 1} + currHash := hashLen(x>>16, bTableBits, hashBytes) + candidate = e.table[currHash] + e.table[currHash] = tableEntry{offset: o + 2} + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) { + cv = x >> 24 + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go new file mode 100644 index 00000000..7aa2b72a --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level3.go @@ -0,0 +1,241 @@ +package flate + +import "fmt" + +// fastEncL3 +type fastEncL3 struct { + fastGen + table [1 << 16]tableEntryPrev +} + +// Encode uses a similar algorithm to level 2, will check up to two candidates. +func (e *fastEncL3) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + tableBits = 16 + tableSize = 1 << tableBits + hashBytes = 5 + ) + + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + } + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + e.table[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // Skip if too small. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 7 + nextS := s + var candidate tableEntry + for { + nextHash := hashLen(cv, tableBits, hashBytes) + s = nextS + nextS = s + 1 + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + candidates := e.table[nextHash] + now := load6432(src, nextS) + + // Safe offset distance until s + 4... + minOffset := e.cur + s - (maxMatchOffset - 4) + e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}} + + // Check both candidates + candidate = candidates.Cur + if candidate.offset < minOffset { + cv = now + // Previous will also be invalid, we have nothing. + continue + } + + if uint32(cv) == load3232(src, candidate.offset-e.cur) { + if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) { + break + } + // Both match and are valid, pick longest. + offset := s - (candidate.offset - e.cur) + o2 := s - (candidates.Prev.offset - e.cur) + l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:]) + if l2 > l1 { + candidate = candidates.Prev + } + break + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + break + } + } + cv = now + } + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + // + t := candidate.offset - e.cur + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + t += l + // Index first pair after match end. + if int(t+8) < len(src) && t > 0 { + cv = load6432(src, t) + nextHash := hashLen(cv, tableBits, hashBytes) + e.table[nextHash] = tableEntryPrev{ + Prev: e.table[nextHash].Cur, + Cur: tableEntry{offset: e.cur + t}, + } + } + goto emitRemainder + } + + // Store every 5th hash in-between. + for i := s - l + 2; i < s-5; i += 6 { + nextHash := hashLen(load6432(src, i), tableBits, hashBytes) + e.table[nextHash] = tableEntryPrev{ + Prev: e.table[nextHash].Cur, + Cur: tableEntry{offset: e.cur + i}} + } + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 to s. + x := load6432(src, s-2) + prevHash := hashLen(x, tableBits, hashBytes) + + e.table[prevHash] = tableEntryPrev{ + Prev: e.table[prevHash].Cur, + Cur: tableEntry{offset: e.cur + s - 2}, + } + x >>= 8 + prevHash = hashLen(x, tableBits, hashBytes) + + e.table[prevHash] = tableEntryPrev{ + Prev: e.table[prevHash].Cur, + Cur: tableEntry{offset: e.cur + s - 1}, + } + x >>= 8 + currHash := hashLen(x, tableBits, hashBytes) + candidates := e.table[currHash] + cv = x + e.table[currHash] = tableEntryPrev{ + Prev: candidates.Cur, + Cur: tableEntry{offset: s + e.cur}, + } + + // Check both candidates + candidate = candidates.Cur + minOffset := e.cur + s - (maxMatchOffset - 4) + + if candidate.offset > minOffset { + if uint32(cv) == load3232(src, candidate.offset-e.cur) { + // Found a match... + continue + } + candidate = candidates.Prev + if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + // Match at prev... + continue + } + } + cv = x >> 8 + s++ + break + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go new file mode 100644 index 00000000..23c08b32 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level4.go @@ -0,0 +1,221 @@ +package flate + +import "fmt" + +type fastEncL4 struct { + fastGen + table [tableSize]tableEntry + bTable [tableSize]tableEntry +} + +func (e *fastEncL4) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.bTable[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 6 + const doEvery = 1 + + nextS := s + var t int32 + for { + nextHashS := hashLen(cv, tableBits, hashShortBytes) + nextHashL := hash7(cv, tableBits) + + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur} + e.table[nextHashS] = entry + e.bTable[nextHashL] = entry + + t = lCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) { + // We got a long match. Use that. + break + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + // Found a 4 match... + lCandidate = e.bTable[hash7(next, tableBits)] + + // If the next long is a candidate, check if we should use that instead... + lOff := nextS - (lCandidate.offset - e.cur) + if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) { + l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:]) + if l2 > l1 { + s = nextS + t = lCandidate.offset - e.cur + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Extend the 4-byte match as long as possible. + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + if debugDeflate { + if t >= s { + panic("s-t") + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + // Index first pair after match end. + if int(s+8) < len(src) { + cv := load6432(src, s) + e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur} + e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur} + } + goto emitRemainder + } + + // Store every 3rd hash in-between + if true { + i := nextS + if i < s-1 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + e.bTable[hash7(cv, tableBits)] = t + e.bTable[hash7(cv>>8, tableBits)] = t2 + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 + + i += 3 + for ; i < s-1; i += 3 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + e.bTable[hash7(cv, tableBits)] = t + e.bTable[hash7(cv>>8, tableBits)] = t2 + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 + } + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := load6432(src, s-1) + o := e.cur + s - 1 + prevHashS := hashLen(x, tableBits, hashShortBytes) + prevHashL := hash7(x, tableBits) + e.table[prevHashS] = tableEntry{offset: o} + e.bTable[prevHashL] = tableEntry{offset: o} + cv = x >> 8 + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go new file mode 100644 index 00000000..83ef50ba --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -0,0 +1,310 @@ +package flate + +import "fmt" + +type fastEncL5 struct { + fastGen + table [tableSize]tableEntry + bTable [tableSize]tableEntryPrev +} + +func (e *fastEncL5) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + v.Prev.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + } + e.bTable[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 6 + const doEvery = 1 + + nextS := s + var l int32 + var t int32 + for { + nextHashS := hashLen(cv, tableBits, hashShortBytes) + nextHashL := hash7(cv, tableBits) + + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur} + e.table[nextHashS] = entry + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = entry, eLong.Cur + + nextHashS = hashLen(next, tableBits, hashShortBytes) + nextHashL = hash7(next, tableBits) + + t = lCandidate.Cur.offset - e.cur + if s-t < maxMatchOffset { + if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + t2 := lCandidate.Prev.offset - e.cur + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + l = e.matchlen(s+4, t+4, src) + 4 + ml1 := e.matchlen(s+4, t2+4, src) + 4 + if ml1 > l { + t = t2 + l = ml1 + break + } + } + break + } + t = lCandidate.Prev.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + break + } + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + // Found a 4 match... + l = e.matchlen(s+4, t+4, src) + 4 + lCandidate = e.bTable[nextHashL] + // Store the next match + + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + // If the next long is a candidate, use that... + t2 := lCandidate.Cur.offset - e.cur + if nextS-t2 < maxMatchOffset { + if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + // If the previous long is a candidate, use that... + t2 = lCandidate.Prev.offset - e.cur + if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + if l == 0 { + // Extend the 4-byte match as long as possible. + l = e.matchlenLong(s+4, t+4, src) + 4 + } else if l == maxMatchLength { + l += e.matchlenLong(s+l, t+l, src) + } + + // Try to locate a better match by checking the end of best match... + if sAt := s + l; l < 30 && sAt < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is 2/3 bytes depending on input. + // 3 is only a little better when it is but sometimes a lot worse. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset + t2 := eLong - e.cur - l + skipBeginning + s2 := s + skipBeginning + off := s2 - t2 + if t2 >= 0 && off < maxMatchOffset && off > 0 { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { + t = t2 + l = l2 + s = s2 + } + } + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + if debugDeflate { + if t >= s { + panic(fmt.Sprintln("s-t", s, t)) + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", s-t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + goto emitRemainder + } + + // Store every 3rd hash in-between. + if true { + const hashEvery = 3 + i := s - l + 1 + if i < s-1 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // Do an long at i+1 + cv >>= 8 + t = tableEntry{offset: t.offset + 1} + eLong = &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // We only have enough bits for a short entry at i+2 + cv >>= 8 + t = tableEntry{offset: t.offset + 1} + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + + // Skip one - otherwise we risk hitting 's' + i += 4 + for ; i < s-1; i += hashEvery { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 + } + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := load6432(src, s-1) + o := e.cur + s - 1 + prevHashS := hashLen(x, tableBits, hashShortBytes) + prevHashL := hash7(x, tableBits) + e.table[prevHashS] = tableEntry{offset: o} + eLong := &e.bTable[prevHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur + cv = x >> 8 + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go new file mode 100644 index 00000000..f1e9d98f --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/level6.go @@ -0,0 +1,325 @@ +package flate + +import "fmt" + +type fastEncL6 struct { + fastGen + table [tableSize]tableEntry + bTable [tableSize]tableEntryPrev +} + +func (e *fastEncL6) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + v.Prev.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + } + e.bTable[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + // Repeat MUST be > 1 and within range + repeat := int32(1) + for { + const skipLog = 7 + const doEvery = 1 + + nextS := s + var l int32 + var t int32 + for { + nextHashS := hashLen(cv, tableBits, hashShortBytes) + nextHashL := hash7(cv, tableBits) + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur} + e.table[nextHashS] = entry + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = entry, eLong.Cur + + // Calculate hashes of 'next' + nextHashS = hashLen(next, tableBits, hashShortBytes) + nextHashL = hash7(next, tableBits) + + t = lCandidate.Cur.offset - e.cur + if s-t < maxMatchOffset { + if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + // Long candidate matches at least 4 bytes. + + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + // Check the previous long candidate as well. + t2 := lCandidate.Prev.offset - e.cur + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + l = e.matchlen(s+4, t+4, src) + 4 + ml1 := e.matchlen(s+4, t2+4, src) + 4 + if ml1 > l { + t = t2 + l = ml1 + break + } + } + break + } + // Current value did not match, but check if previous long value does. + t = lCandidate.Prev.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + break + } + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + // Found a 4 match... + l = e.matchlen(s+4, t+4, src) + 4 + + // Look up next long candidate (at nextS) + lCandidate = e.bTable[nextHashL] + + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + // Check repeat at s + repOff + const repOff = 1 + t2 := s - repeat + repOff + if load3232(src, t2) == uint32(cv>>(8*repOff)) { + ml := e.matchlen(s+4+repOff, t2+4, src) + 4 + if ml > l { + t = t2 + l = ml + s += repOff + // Not worth checking more. + break + } + } + + // If the next long is a candidate, use that... + t2 = lCandidate.Cur.offset - e.cur + if nextS-t2 < maxMatchOffset { + if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + // This is ok, but check previous as well. + } + } + // If the previous long is a candidate, use that... + t2 = lCandidate.Prev.offset - e.cur + if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Extend the 4-byte match as long as possible. + if l == 0 { + l = e.matchlenLong(s+4, t+4, src) + 4 + } else if l == maxMatchLength { + l += e.matchlenLong(s+l, t+l, src) + } + + // Try to locate a better match by checking the end-of-match... + if sAt := s + l; sAt < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is 2/3 bytes depending on input. + // 3 is only a little better when it is but sometimes a lot worse. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)] + // Test current + t2 := eLong.Cur.offset - e.cur - l + skipBeginning + s2 := s + skipBeginning + off := s2 - t2 + if off < maxMatchOffset { + if off > 0 && t2 >= 0 { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { + t = t2 + l = l2 + s = s2 + } + } + // Test next: + t2 = eLong.Prev.offset - e.cur - l + skipBeginning + off := s2 - t2 + if off > 0 && off < maxMatchOffset && t2 >= 0 { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { + t = t2 + l = l2 + s = s2 + } + } + } + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + if false { + if t >= s { + panic(fmt.Sprintln("s-t", s, t)) + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", s-t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + repeat = s - t + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + // Index after match end. + for i := nextS + 1; i < int32(len(src))-8; i += 2 { + cv := load6432(src, i) + e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur + } + goto emitRemainder + } + + // Store every long hash in-between and every second short. + if true { + for i := nextS + 1; i < s-1; i += 2 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong2 := &e.bTable[hash7(cv>>8, tableBits)] + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + eLong.Cur, eLong.Prev = t, eLong.Cur + eLong2.Cur, eLong2.Prev = t2, eLong2.Cur + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + cv = load6432(src, s) + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/regmask_amd64.go b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go new file mode 100644 index 00000000..6ed28061 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go @@ -0,0 +1,37 @@ +package flate + +const ( + // Masks for shifts with register sizes of the shift value. + // This can be used to work around the x86 design of shifting by mod register size. + // It can be used when a variable shift is always smaller than the register size. + + // reg8SizeMaskX - shift value is 8 bits, shifted is X + reg8SizeMask8 = 7 + reg8SizeMask16 = 15 + reg8SizeMask32 = 31 + reg8SizeMask64 = 63 + + // reg16SizeMaskX - shift value is 16 bits, shifted is X + reg16SizeMask8 = reg8SizeMask8 + reg16SizeMask16 = reg8SizeMask16 + reg16SizeMask32 = reg8SizeMask32 + reg16SizeMask64 = reg8SizeMask64 + + // reg32SizeMaskX - shift value is 32 bits, shifted is X + reg32SizeMask8 = reg8SizeMask8 + reg32SizeMask16 = reg8SizeMask16 + reg32SizeMask32 = reg8SizeMask32 + reg32SizeMask64 = reg8SizeMask64 + + // reg64SizeMaskX - shift value is 64 bits, shifted is X + reg64SizeMask8 = reg8SizeMask8 + reg64SizeMask16 = reg8SizeMask16 + reg64SizeMask32 = reg8SizeMask32 + reg64SizeMask64 = reg8SizeMask64 + + // regSizeMaskUintX - shift value is uint, shifted is X + regSizeMaskUint8 = reg8SizeMask8 + regSizeMaskUint16 = reg8SizeMask16 + regSizeMaskUint32 = reg8SizeMask32 + regSizeMaskUint64 = reg8SizeMask64 +) diff --git a/vendor/github.com/klauspost/compress/flate/regmask_other.go b/vendor/github.com/klauspost/compress/flate/regmask_other.go new file mode 100644 index 00000000..1b7a2cbd --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/regmask_other.go @@ -0,0 +1,40 @@ +//go:build !amd64 +// +build !amd64 + +package flate + +const ( + // Masks for shifts with register sizes of the shift value. + // This can be used to work around the x86 design of shifting by mod register size. + // It can be used when a variable shift is always smaller than the register size. + + // reg8SizeMaskX - shift value is 8 bits, shifted is X + reg8SizeMask8 = 0xff + reg8SizeMask16 = 0xff + reg8SizeMask32 = 0xff + reg8SizeMask64 = 0xff + + // reg16SizeMaskX - shift value is 16 bits, shifted is X + reg16SizeMask8 = 0xffff + reg16SizeMask16 = 0xffff + reg16SizeMask32 = 0xffff + reg16SizeMask64 = 0xffff + + // reg32SizeMaskX - shift value is 32 bits, shifted is X + reg32SizeMask8 = 0xffffffff + reg32SizeMask16 = 0xffffffff + reg32SizeMask32 = 0xffffffff + reg32SizeMask64 = 0xffffffff + + // reg64SizeMaskX - shift value is 64 bits, shifted is X + reg64SizeMask8 = 0xffffffffffffffff + reg64SizeMask16 = 0xffffffffffffffff + reg64SizeMask32 = 0xffffffffffffffff + reg64SizeMask64 = 0xffffffffffffffff + + // regSizeMaskUintX - shift value is uint, shifted is X + regSizeMaskUint8 = ^uint(0) + regSizeMaskUint16 = ^uint(0) + regSizeMaskUint32 = ^uint(0) + regSizeMaskUint64 = ^uint(0) +) diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go new file mode 100644 index 00000000..f3d4139e --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -0,0 +1,318 @@ +package flate + +import ( + "io" + "math" + "sync" +) + +const ( + maxStatelessBlock = math.MaxInt16 + // dictionary will be taken from maxStatelessBlock, so limit it. + maxStatelessDict = 8 << 10 + + slTableBits = 13 + slTableSize = 1 << slTableBits + slTableShift = 32 - slTableBits +) + +type statelessWriter struct { + dst io.Writer + closed bool +} + +func (s *statelessWriter) Close() error { + if s.closed { + return nil + } + s.closed = true + // Emit EOF block + return StatelessDeflate(s.dst, nil, true, nil) +} + +func (s *statelessWriter) Write(p []byte) (n int, err error) { + err = StatelessDeflate(s.dst, p, false, nil) + if err != nil { + return 0, err + } + return len(p), nil +} + +func (s *statelessWriter) Reset(w io.Writer) { + s.dst = w + s.closed = false +} + +// NewStatelessWriter will do compression but without maintaining any state +// between Write calls. +// There will be no memory kept between Write calls, +// but compression and speed will be suboptimal. +// Because of this, the size of actual Write calls will affect output size. +func NewStatelessWriter(dst io.Writer) io.WriteCloser { + return &statelessWriter{dst: dst} +} + +// bitWriterPool contains bit writers that can be reused. +var bitWriterPool = sync.Pool{ + New: func() interface{} { + return newHuffmanBitWriter(nil) + }, +} + +// StatelessDeflate allows compressing directly to a Writer without retaining state. +// When returning everything will be flushed. +// Up to 8KB of an optional dictionary can be given which is presumed to precede the block. +// Longer dictionaries will be truncated and will still produce valid output. +// Sending nil dictionary is perfectly fine. +func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { + var dst tokens + bw := bitWriterPool.Get().(*huffmanBitWriter) + bw.reset(out) + defer func() { + // don't keep a reference to our output + bw.reset(nil) + bitWriterPool.Put(bw) + }() + if eof && len(in) == 0 { + // Just write an EOF block. + // Could be faster... + bw.writeStoredHeader(0, true) + bw.flush() + return bw.err + } + + // Truncate dict + if len(dict) > maxStatelessDict { + dict = dict[len(dict)-maxStatelessDict:] + } + + // For subsequent loops, keep shallow dict reference to avoid alloc+copy. + var inDict []byte + + for len(in) > 0 { + todo := in + if len(inDict) > 0 { + if len(todo) > maxStatelessBlock-maxStatelessDict { + todo = todo[:maxStatelessBlock-maxStatelessDict] + } + } else if len(todo) > maxStatelessBlock-len(dict) { + todo = todo[:maxStatelessBlock-len(dict)] + } + inOrg := in + in = in[len(todo):] + uncompressed := todo + if len(dict) > 0 { + // combine dict and source + bufLen := len(todo) + len(dict) + combined := make([]byte, bufLen) + copy(combined, dict) + copy(combined[len(dict):], todo) + todo = combined + } + // Compress + if len(inDict) == 0 { + statelessEnc(&dst, todo, int16(len(dict))) + } else { + statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict) + } + isEof := eof && len(in) == 0 + + if dst.n == 0 { + bw.writeStoredHeader(len(uncompressed), isEof) + if bw.err != nil { + return bw.err + } + bw.writeBytes(uncompressed) + } else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 { + // If we removed less than 1/16th, huffman compress the block. + bw.writeBlockHuff(isEof, uncompressed, len(in) == 0) + } else { + bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0) + } + if len(in) > 0 { + // Retain a dict if we have more + inDict = inOrg[len(uncompressed)-maxStatelessDict:] + dict = nil + dst.Reset() + } + if bw.err != nil { + return bw.err + } + } + if !eof { + // Align, only a stored block can do that. + bw.writeStoredHeader(0, false) + } + bw.flush() + return bw.err +} + +func hashSL(u uint32) uint32 { + return (u * 0x1e35a7bd) >> slTableShift +} + +func load3216(b []byte, i int16) uint32 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:4] + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load6416(b []byte, i int16) uint64 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:8] + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func statelessEnc(dst *tokens, src []byte, startAt int16) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + type tableEntry struct { + offset int16 + } + + var table [slTableSize]tableEntry + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src)-int(startAt) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = 0 + return + } + // Index until startAt + if startAt > 0 { + cv := load3232(src, 0) + for i := int16(0); i < startAt; i++ { + table[hashSL(cv)] = tableEntry{offset: i} + cv = (cv >> 8) | (uint32(src[i+4]) << 24) + } + } + + s := startAt + 1 + nextEmit := startAt + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int16(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load3216(src, s) + + for { + const skipLog = 5 + const doEvery = 2 + + nextS := s + var candidate tableEntry + for { + nextHash := hashSL(cv) + candidate = table[nextHash] + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit || nextS <= 0 { + goto emitRemainder + } + + now := load6416(src, nextS) + table[nextHash] = tableEntry{offset: s} + nextHash = hashSL(uint32(now)) + + if cv == load3216(src, candidate.offset) { + table[nextHash] = tableEntry{offset: nextS} + break + } + + // Do one right away... + cv = uint32(now) + s = nextS + nextS++ + candidate = table[nextHash] + now >>= 8 + table[nextHash] = tableEntry{offset: s} + + if cv == load3216(src, candidate.offset) { + table[nextHash] = tableEntry{offset: nextS} + break + } + cv = uint32(now) + s = nextS + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + t := candidate.offset + l := int16(matchLen(src[s+4:], src[t+4:]) + 4) + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + // Save the match found + dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6416(src, s-2) + o := s - 2 + prevHash := hashSL(uint32(x)) + table[prevHash] = tableEntry{offset: o} + x >>= 16 + currHash := hashSL(uint32(x)) + candidate = table[currHash] + table[currHash] = tableEntry{offset: o + 2} + + if uint32(x) != load3216(src, candidate.offset) { + cv = uint32(x >> 8) + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go new file mode 100644 index 00000000..d818790c --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -0,0 +1,379 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "math" +) + +const ( + // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits + // bits 16-22 offsetcode - 5 bits + // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits + // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits + lengthShift = 22 + offsetMask = 1<maxnumlit + offHist [32]uint16 // offset codes + litHist [256]uint16 // codes 0->255 + nFilled int + n uint16 // Must be able to contain maxStoreBlockSize + tokens [maxStoreBlockSize + 1]token +} + +func (t *tokens) Reset() { + if t.n == 0 { + return + } + t.n = 0 + t.nFilled = 0 + for i := range t.litHist[:] { + t.litHist[i] = 0 + } + for i := range t.extraHist[:] { + t.extraHist[i] = 0 + } + for i := range t.offHist[:] { + t.offHist[i] = 0 + } +} + +func (t *tokens) Fill() { + if t.n == 0 { + return + } + for i, v := range t.litHist[:] { + if v == 0 { + t.litHist[i] = 1 + t.nFilled++ + } + } + for i, v := range t.extraHist[:literalCount-256] { + if v == 0 { + t.nFilled++ + t.extraHist[i] = 1 + } + } + for i, v := range t.offHist[:offsetCodeCount] { + if v == 0 { + t.offHist[i] = 1 + } + } +} + +func indexTokens(in []token) tokens { + var t tokens + t.indexTokens(in) + return t +} + +func (t *tokens) indexTokens(in []token) { + t.Reset() + for _, tok := range in { + if tok < matchType { + t.AddLiteral(tok.literal()) + continue + } + t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask) + } +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +func emitLiteral(dst *tokens, lit []byte) { + for _, v := range lit { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } +} + +func (t *tokens) AddLiteral(lit byte) { + t.tokens[t.n] = token(lit) + t.litHist[lit]++ + t.n++ +} + +// from https://stackoverflow.com/a/28730362 +func mFastLog2(val float32) float32 { + ux := int32(math.Float32bits(val)) + log2 := (float32)(((ux >> 23) & 255) - 128) + ux &= -0x7f800001 + ux += 127 << 23 + uval := math.Float32frombits(uint32(ux)) + log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759 + return log2 +} + +// EstimatedBits will return an minimum size estimated by an *optimal* +// compression of the block. +// The size of the block +func (t *tokens) EstimatedBits() int { + shannon := float32(0) + bits := int(0) + nMatches := 0 + total := int(t.n) + t.nFilled + if total > 0 { + invTotal := 1.0 / float32(total) + for _, v := range t.litHist[:] { + if v > 0 { + n := float32(v) + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n + } + } + // Just add 15 for EOB + shannon += 15 + for i, v := range t.extraHist[1 : literalCount-256] { + if v > 0 { + n := float32(v) + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n + bits += int(lengthExtraBits[i&31]) * int(v) + nMatches += int(v) + } + } + } + if nMatches > 0 { + invTotal := 1.0 / float32(nMatches) + for i, v := range t.offHist[:offsetCodeCount] { + if v > 0 { + n := float32(v) + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n + bits += int(offsetExtraBits[i&31]) * int(v) + } + } + } + return int(shannon) + bits +} + +// AddMatch adds a match to the tokens. +// This function is very sensitive to inlining and right on the border. +func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { + if debugDeflate { + if xlength >= maxMatchLength+baseMatchLength { + panic(fmt.Errorf("invalid length: %v", xlength)) + } + if xoffset >= maxMatchOffset+baseMatchOffset { + panic(fmt.Errorf("invalid offset: %v", xoffset)) + } + } + oCode := offsetCode(xoffset) + xoffset |= oCode << 16 + + t.extraHist[lengthCodes1[uint8(xlength)]]++ + t.offHist[oCode&31]++ + t.tokens[t.n] = token(matchType | xlength<= maxMatchOffset+baseMatchOffset { + panic(fmt.Errorf("invalid offset: %v", xoffset)) + } + } + oc := offsetCode(xoffset) + xoffset |= oc << 16 + for xlength > 0 { + xl := xlength + if xl > 258 { + // We need to have at least baseMatchLength left over for next loop. + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } + } + xlength -= xl + xl -= baseMatchLength + t.extraHist[lengthCodes1[uint8(xl)]]++ + t.offHist[oc&31]++ + t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) } + +// Convert length to code. +func lengthCode(len uint8) uint8 { return lengthCodes[len] } + +// Returns the offset code corresponding to a specific offset +func offsetCode(off uint32) uint32 { + if false { + if off < uint32(len(offsetCodes)) { + return offsetCodes[off&255] + } else if off>>7 < uint32(len(offsetCodes)) { + return offsetCodes[(off>>7)&255] + 14 + } else { + return offsetCodes[(off>>14)&255] + 28 + } + } + if off < uint32(len(offsetCodes)) { + return offsetCodes[uint8(off)] + } + return offsetCodes14[uint8(off>>7)] +} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index 6f341914..dac97e58 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -146,54 +146,51 @@ func (s *Scratch) compress(src []byte) error { c1.encodeZero(tt[src[ip-2]]) ip -= 2 } + src = src[:ip] // Main compression loop. switch { case !s.zeroBits && s.actualTableLog <= 8: // We can encode 4 symbols without requiring a flush. // We do not need to check if any output is 0 bits. - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encode(tt[v0]) c1.encode(tt[v1]) c2.encode(tt[v2]) c1.encode(tt[v3]) - ip -= 4 } case !s.zeroBits: // We do not need to check if any output is 0 bits. - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encode(tt[v0]) c1.encode(tt[v1]) s.bw.flush32() c2.encode(tt[v2]) c1.encode(tt[v3]) - ip -= 4 } case s.actualTableLog <= 8: // We can encode 4 symbols without requiring a flush - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encodeZero(tt[v0]) c1.encodeZero(tt[v1]) c2.encodeZero(tt[v2]) c1.encodeZero(tt[v3]) - ip -= 4 } default: - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encodeZero(tt[v0]) c1.encodeZero(tt[v1]) s.bw.flush32() c2.encodeZero(tt[v2]) c1.encodeZero(tt[v3]) - ip -= 4 } } @@ -459,15 +456,17 @@ func (s *Scratch) countSimple(in []byte) (max int) { for _, v := range in { s.count[v]++ } - m := uint32(0) + m, symlen := uint32(0), s.symbolLen for i, v := range s.count[:] { + if v == 0 { + continue + } if v > m { m = v } - if v > 0 { - s.symbolLen = uint16(i) + 1 - } + symlen = uint16(i) + 1 } + s.symbolLen = symlen return int(m) } diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go index 926f5f15..cc05d0f7 100644 --- a/vendor/github.com/klauspost/compress/fse/decompress.go +++ b/vendor/github.com/klauspost/compress/fse/decompress.go @@ -260,7 +260,9 @@ func (s *Scratch) buildDtable() error { // If the buffer is over-read an error is returned. func (s *Scratch) decompress() error { br := &s.bits - br.init(s.br.unread()) + if err := br.init(s.br.unread()); err != nil { + return err + } var s1, s2 decoder // Initialize and decode first state and symbol. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go index 504a7be9..e36d9742 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -67,7 +67,6 @@ func (b *bitReaderBytes) fillFast() { // 2 bounds checks. v := b.in[b.off-4 : b.off] - v = v[:4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 @@ -88,8 +87,7 @@ func (b *bitReaderBytes) fill() { return } if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] + v := b.in[b.off-4 : b.off] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 @@ -179,7 +177,6 @@ func (b *bitReaderShifted) fillFast() { // 2 bounds checks. v := b.in[b.off-4 : b.off] - v = v[:4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 @@ -200,8 +197,7 @@ func (b *bitReaderShifted) fill() { return } if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] + v := b.in[b.off-4 : b.off] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go index ec71f7a3..aed2347c 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -60,6 +60,22 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { b.nBits += encA.nBits + encB.nBits } +// encFourSymbols adds up to 32 bits from four symbols. +// It will not check if there is space for them, +// so the caller must ensure that b has been flushed recently. +func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) { + bitsA := encA.nBits + bitsB := bitsA + encB.nBits + bitsC := bitsB + encC.nBits + bitsD := bitsC + encD.nBits + combined := uint64(encA.val) | + (uint64(encB.val) << (bitsA & 63)) | + (uint64(encC.val) << (bitsB & 63)) | + (uint64(encD.val) << (bitsC & 63)) + b.bitContainer |= combined << (b.nBits & 63) + b.nBits += bitsD +} + // flush32 will flush out, so there are at least 32 bits available for writing. func (b *bitWriter) flush32() { if b.nBits < 32 { diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index d9223a91..4ee4fa18 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -248,8 +248,7 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { tmp := src[n : n+4] // tmp should be len 4 bw.flush32() - bw.encTwoSymbols(cTable, tmp[3], tmp[2]) - bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]]) } } else { for ; n >= 0; n -= 4 { @@ -484,34 +483,35 @@ func (s *Scratch) buildCTable() error { // Different from reference implementation. huffNode0 := s.nodes[0 : huffNodesLen+1] - for huffNode[nonNullRank].count == 0 { + for huffNode[nonNullRank].count() == 0 { nonNullRank-- } lowS := int16(nonNullRank) nodeRoot := nodeNb + lowS - 1 lowN := nodeNb - huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count - huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) + huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) + huffNode[lowS].setParent(nodeNb) + huffNode[lowS-1].setParent(nodeNb) nodeNb++ lowS -= 2 for n := nodeNb; n <= nodeRoot; n++ { - huffNode[n].count = 1 << 30 + huffNode[n].setCount(1 << 30) } // fake entry, strong barrier - huffNode0[0].count = 1 << 31 + huffNode0[0].setCount(1 << 31) // create parents for nodeNb <= nodeRoot { var n1, n2 int16 - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { n1 = lowS lowS-- } else { n1 = lowN lowN++ } - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { n2 = lowS lowS-- } else { @@ -519,18 +519,19 @@ func (s *Scratch) buildCTable() error { lowN++ } - huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count - huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) + huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) + huffNode0[n1+1].setParent(nodeNb) + huffNode0[n2+1].setParent(nodeNb) nodeNb++ } // distribute weights (unlimited tree height) - huffNode[nodeRoot].nbBits = 0 + huffNode[nodeRoot].setNbBits(0) for n := nodeRoot - 1; n >= startNode; n-- { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) } for n := uint16(0); n <= nonNullRank; n++ { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) } s.actualTableLog = s.setMaxHeight(int(nonNullRank)) maxNbBits := s.actualTableLog @@ -542,7 +543,7 @@ func (s *Scratch) buildCTable() error { var nbPerRank [tableLogMax + 1]uint16 var valPerRank [16]uint16 for _, v := range huffNode[:nonNullRank+1] { - nbPerRank[v.nbBits]++ + nbPerRank[v.nbBits()]++ } // determine stating value per rank { @@ -557,7 +558,7 @@ func (s *Scratch) buildCTable() error { // push nbBits per symbol, symbol order for _, v := range huffNode[:nonNullRank+1] { - s.cTable[v.symbol].nBits = v.nbBits + s.cTable[v.symbol()].nBits = v.nbBits() } // assign value within rank, symbol order @@ -603,12 +604,12 @@ func (s *Scratch) huffSort() { pos := rank[r].current rank[r].current++ prev := nodes[(pos-1)&huffNodesMask] - for pos > rank[r].base && c > prev.count { + for pos > rank[r].base && c > prev.count() { nodes[pos&huffNodesMask] = prev pos-- prev = nodes[(pos-1)&huffNodesMask] } - nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} + nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) } } @@ -617,7 +618,7 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { huffNode := s.nodes[1 : huffNodesLen+1] //huffNode = huffNode[: huffNodesLen] - largestBits := huffNode[lastNonNull].nbBits + largestBits := huffNode[lastNonNull].nbBits() // early exit : no elt > maxNbBits if largestBits <= maxNbBits { @@ -627,14 +628,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { baseCost := int(1) << (largestBits - maxNbBits) n := uint32(lastNonNull) - for huffNode[n].nbBits > maxNbBits { - totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) - huffNode[n].nbBits = maxNbBits + for huffNode[n].nbBits() > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) + huffNode[n].setNbBits(maxNbBits) n-- } // n stops at huffNode[n].nbBits <= maxNbBits - for huffNode[n].nbBits == maxNbBits { + for huffNode[n].nbBits() == maxNbBits { n-- } // n end at index of smallest symbol using < maxNbBits @@ -655,10 +656,10 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { { currentNbBits := maxNbBits for pos := int(n); pos >= 0; pos-- { - if huffNode[pos].nbBits >= currentNbBits { + if huffNode[pos].nbBits() >= currentNbBits { continue } - currentNbBits = huffNode[pos].nbBits // < maxNbBits + currentNbBits = huffNode[pos].nbBits() // < maxNbBits rankLast[maxNbBits-currentNbBits] = uint32(pos) } } @@ -675,8 +676,8 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { if lowPos == noSymbol { break } - highTotal := huffNode[highPos].count - lowTotal := 2 * huffNode[lowPos].count + highTotal := huffNode[highPos].count() + lowTotal := 2 * huffNode[lowPos].count() if highTotal <= lowTotal { break } @@ -692,13 +693,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { // this rank is no longer empty rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] } - huffNode[rankLast[nBitsToDecrease]].nbBits++ + huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + + huffNode[rankLast[nBitsToDecrease]].nbBits()) if rankLast[nBitsToDecrease] == 0 { /* special case, reached largest symbol */ rankLast[nBitsToDecrease] = noSymbol } else { rankLast[nBitsToDecrease]-- - if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { + if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ } } @@ -706,15 +708,15 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { for totalCost < 0 { /* Sometimes, cost correction overshoot */ if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ - for huffNode[n].nbBits == maxNbBits { + for huffNode[n].nbBits() == maxNbBits { n-- } - huffNode[n+1].nbBits-- + huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) rankLast[1] = n + 1 totalCost++ continue } - huffNode[rankLast[1]+1].nbBits-- + huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) rankLast[1]++ totalCost++ } @@ -722,9 +724,26 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { return maxNbBits } -type nodeElt struct { - count uint32 - parent uint16 - symbol byte - nbBits uint8 +// A nodeElt is the fields +// +// count uint32 +// parent uint16 +// symbol byte +// nbBits uint8 +// +// in some order, all squashed into an integer so that the compiler +// always loads and stores entire nodeElts instead of separate fields. +type nodeElt uint64 + +func makeNodeElt(count uint32, symbol byte) nodeElt { + return nodeElt(count) | nodeElt(symbol)<<48 } + +func (e *nodeElt) count() uint32 { return uint32(*e) } +func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } +func (e *nodeElt) symbol() byte { return byte(*e >> 48) } +func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } + +func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } +func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } +func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 42a237ea..3c0b398c 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -61,7 +61,7 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { b, err := fse.Decompress(in[:iSize], s.fse) s.fse.Out = nil if err != nil { - return s, nil, err + return s, nil, fmt.Errorf("fse decompress returned: %w", err) } if len(b) > 255 { return s, nil, errors.New("corrupt input: output table too large") diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s index 8d2187a2..c4c7ab2d 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -4,360 +4,349 @@ // func decompress4x_main_loop_amd64(ctx *decompress4xContext) TEXT ·decompress4x_main_loop_amd64(SB), $0-8 - XORQ DX, DX - // Preload values MOVQ ctx+0(FP), AX MOVBQZX 8(AX), DI - MOVQ 16(AX), SI - MOVQ 48(AX), BX - MOVQ 24(AX), R9 - MOVQ 32(AX), R10 - MOVQ (AX), R11 + MOVQ 16(AX), BX + MOVQ 48(AX), SI + MOVQ 24(AX), R8 + MOVQ 32(AX), R9 + MOVQ (AX), R10 // Main loop main_loop: - MOVQ SI, R8 - CMPQ R8, BX + XORL DX, DX + CMPQ BX, SI SETGE DL // br0.fillFast32() - MOVQ 32(R11), R12 - MOVBQZX 40(R11), R13 - CMPQ R13, $0x20 + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 JBE skip_fill0 - MOVQ 24(R11), AX - SUBQ $0x20, R13 + MOVQ 24(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ (R11), R14 + MOVQ (R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 24(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 24(R10) + ORQ R13, R11 - // exhausted = exhausted || (br0.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br0.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill0: // val0 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br0.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX) // update the bitreader structure - MOVQ R12, 32(R11) - MOVB R13, 40(R11) - ADDQ R9, R8 + MOVQ R11, 32(R10) + MOVB R12, 40(R10) // br1.fillFast32() - MOVQ 80(R11), R12 - MOVBQZX 88(R11), R13 - CMPQ R13, $0x20 + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 JBE skip_fill1 - MOVQ 72(R11), AX - SUBQ $0x20, R13 + MOVQ 72(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 48(R11), R14 + MOVQ 48(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 72(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 72(R10) + ORQ R13, R11 - // exhausted = exhausted || (br1.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br1.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill1: // val0 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br1.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX)(R8*1) // update the bitreader structure - MOVQ R12, 80(R11) - MOVB R13, 88(R11) - ADDQ R9, R8 + MOVQ R11, 80(R10) + MOVB R12, 88(R10) // br2.fillFast32() - MOVQ 128(R11), R12 - MOVBQZX 136(R11), R13 - CMPQ R13, $0x20 + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 JBE skip_fill2 - MOVQ 120(R11), AX - SUBQ $0x20, R13 + MOVQ 120(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 96(R11), R14 + MOVQ 96(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 120(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 120(R10) + ORQ R13, R11 - // exhausted = exhausted || (br2.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br2.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill2: // val0 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br2.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX)(R8*2) // update the bitreader structure - MOVQ R12, 128(R11) - MOVB R13, 136(R11) - ADDQ R9, R8 + MOVQ R11, 128(R10) + MOVB R12, 136(R10) // br3.fillFast32() - MOVQ 176(R11), R12 - MOVBQZX 184(R11), R13 - CMPQ R13, $0x20 + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 JBE skip_fill3 - MOVQ 168(R11), AX - SUBQ $0x20, R13 + MOVQ 168(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 144(R11), R14 + MOVQ 144(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 168(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 168(R10) + ORQ R13, R11 - // exhausted = exhausted || (br3.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br3.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill3: // val0 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br3.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + LEAQ (R8)(R8*2), CX + MOVW AX, (BX)(CX*1) // update the bitreader structure - MOVQ R12, 176(R11) - MOVB R13, 184(R11) - ADDQ $0x02, SI + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x02, BX TESTB DL, DL JZ main_loop MOVQ ctx+0(FP), AX - SUBQ 16(AX), SI - SHLQ $0x02, SI - MOVQ SI, 40(AX) + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) RET // func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 - XORQ DX, DX - // Preload values MOVQ ctx+0(FP), CX MOVBQZX 8(CX), DI MOVQ 16(CX), BX MOVQ 48(CX), SI - MOVQ 24(CX), R9 - MOVQ 32(CX), R10 - MOVQ (CX), R11 + MOVQ 24(CX), R8 + MOVQ 32(CX), R9 + MOVQ (CX), R10 // Main loop main_loop: - MOVQ BX, R8 - CMPQ R8, SI + XORL DX, DX + CMPQ BX, SI SETGE DL // br0.fillFast32() - MOVQ 32(R11), R12 - MOVBQZX 40(R11), R13 - CMPQ R13, $0x20 + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 JBE skip_fill0 - MOVQ 24(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ (R11), R15 + MOVQ 24(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ (R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 24(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 24(R10) + ORQ R14, R11 - // exhausted = exhausted || (br0.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br0.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill0: // val0 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -365,88 +354,86 @@ skip_fill0: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX) // update the bitreader structure - MOVQ R12, 32(R11) - MOVB R13, 40(R11) - ADDQ R9, R8 + MOVQ R11, 32(R10) + MOVB R12, 40(R10) // br1.fillFast32() - MOVQ 80(R11), R12 - MOVBQZX 88(R11), R13 - CMPQ R13, $0x20 + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 JBE skip_fill1 - MOVQ 72(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 48(R11), R15 + MOVQ 72(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 48(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 72(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 72(R10) + ORQ R14, R11 - // exhausted = exhausted || (br1.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br1.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill1: // val0 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -454,88 +441,86 @@ skip_fill1: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX)(R8*1) // update the bitreader structure - MOVQ R12, 80(R11) - MOVB R13, 88(R11) - ADDQ R9, R8 + MOVQ R11, 80(R10) + MOVB R12, 88(R10) // br2.fillFast32() - MOVQ 128(R11), R12 - MOVBQZX 136(R11), R13 - CMPQ R13, $0x20 + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 JBE skip_fill2 - MOVQ 120(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 96(R11), R15 + MOVQ 120(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 96(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 120(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 120(R10) + ORQ R14, R11 - // exhausted = exhausted || (br2.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br2.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill2: // val0 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -543,88 +528,86 @@ skip_fill2: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX)(R8*2) // update the bitreader structure - MOVQ R12, 128(R11) - MOVB R13, 136(R11) - ADDQ R9, R8 + MOVQ R11, 128(R10) + MOVB R12, 136(R10) // br3.fillFast32() - MOVQ 176(R11), R12 - MOVBQZX 184(R11), R13 - CMPQ R13, $0x20 + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 JBE skip_fill3 - MOVQ 168(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 144(R11), R15 + MOVQ 168(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 144(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 168(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 168(R10) + ORQ R14, R11 - // exhausted = exhausted || (br3.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br3.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill3: // val0 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -632,11 +615,12 @@ skip_fill3: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + LEAQ (R8)(R8*2), CX + MOVL AX, (BX)(CX*1) // update the bitreader structure - MOVQ R12, 176(R11) - MOVB R13, 184(R11) + MOVQ R11, 176(R10) + MOVB R12, 184(R10) ADDQ $0x04, BX TESTB DL, DL JZ main_loop @@ -652,7 +636,7 @@ TEXT ·decompress1x_main_loop_amd64(SB), $0-8 MOVQ 16(CX), DX MOVQ 24(CX), BX CMPQ BX, $0x04 - JB error_max_decoded_size_exeeded + JB error_max_decoded_size_exceeded LEAQ (DX)(BX*1), BX MOVQ (CX), SI MOVQ (SI), R8 @@ -667,7 +651,7 @@ main_loop: // Check if we have room for 4 bytes in the output buffer LEAQ 4(DX), CX CMPQ CX, BX - JGE error_max_decoded_size_exeeded + JGE error_max_decoded_size_exceeded // Decode 4 values CMPQ R11, $0x20 @@ -744,7 +728,7 @@ loop_condition: RET // Report error -error_max_decoded_size_exeeded: +error_max_decoded_size_exceeded: MOVQ ctx+0(FP), AX MOVQ $-1, CX MOVQ CX, 40(AX) @@ -757,7 +741,7 @@ TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 MOVQ 16(CX), DX MOVQ 24(CX), BX CMPQ BX, $0x04 - JB error_max_decoded_size_exeeded + JB error_max_decoded_size_exceeded LEAQ (DX)(BX*1), BX MOVQ (CX), SI MOVQ (SI), R8 @@ -772,7 +756,7 @@ main_loop: // Check if we have room for 4 bytes in the output buffer LEAQ 4(DX), CX CMPQ CX, BX - JGE error_max_decoded_size_exeeded + JGE error_max_decoded_size_exceeded // Decode 4 values CMPQ R11, $0x20 @@ -839,7 +823,7 @@ loop_condition: RET // Report error -error_max_decoded_size_exeeded: +error_max_decoded_size_exceeded: MOVQ ctx+0(FP), AX MOVQ $-1, CX MOVQ CX, 40(AX) diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index 298c4f8e..05db94d3 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -103,6 +103,28 @@ func hash(u, shift uint32) uint32 { return (u * 0x1e35a7bd) >> shift } +// EncodeBlockInto exposes encodeBlock but checks dst size. +func EncodeBlockInto(dst, src []byte) (d int) { + if MaxEncodedLen(len(src)) > len(dst) { + return 0 + } + + // encodeBlock breaks on too big blocks, so split. + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return d +} + // encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It // assumes that the varint-encoded length of the decompressed bytes has already // been written. diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 6b9929dd..5f272d87 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -9,6 +9,7 @@ import ( "encoding/binary" "errors" "fmt" + "hash/crc32" "io" "os" "path/filepath" @@ -192,16 +193,14 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } // Read block data. - if cap(b.dataStorage) < cSize { + if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { + // byteBuf doesn't need a destination buffer. if b.lowMem || cSize > maxCompressedBlockSize { b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) } else { b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) } } - if cap(b.dst) <= maxSize { - b.dst = make([]byte, 0, maxSize+1) - } b.data, err = br.readBig(cSize, b.dataStorage) if err != nil { if debugDecoder { @@ -210,6 +209,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } return err } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } return nil } @@ -441,6 +443,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err } } var err error + if debugDecoder { + println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals)) + } huff, literals, err = huff0.ReadTable(literals, huff) if err != nil { println("reading huffman table:", err) diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 12e8f6f0..fd4a36f7 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -473,7 +473,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { return b.encodeLits(b.literals, rawAllLits) } // We want some difference to at least account for the headers. - saved := b.size - len(b.literals) - (b.size >> 5) + saved := b.size - len(b.literals) - (b.size >> 6) if saved < 16 { if org == nil { return errIncompressible @@ -779,10 +779,13 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { } b.output = wr.out + // Maybe even add a bigger margin. if len(b.output)-3-bhOffset >= b.size { - // Maybe even add a bigger margin. + // Discard and encode as raw block. + b.output = b.encodeRawTo(b.output[:bhOffset], org) + b.popOffsets() b.litEnc.Reuse = huff0.ReusePolicyNone - return errIncompressible + return nil } // Size is output minus block header. diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index 176788f2..512ffe5b 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -54,7 +54,7 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { func (b *byteBuf) readByte() (byte, error) { bb := *b if len(bb) < 1 { - return 0, nil + return 0, io.ErrUnexpectedEOF } r := bb[0] *b = bb[1:] diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 30459cd3..f04aaa21 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -40,8 +40,7 @@ type Decoder struct { frame *frameDec // Custom dictionaries. - // Always uses copies. - dicts map[uint32]dict + dicts map[uint32]*dict // streamWg is the waitgroup for all streams streamWg sync.WaitGroup @@ -103,7 +102,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { } // Transfer option dicts. - d.dicts = make(map[uint32]dict, len(d.o.dicts)) + d.dicts = make(map[uint32]*dict, len(d.o.dicts)) for _, dc := range d.o.dicts { d.dicts[dc.id] = dc } @@ -341,15 +340,8 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { } return dst, err } - if frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - return nil, ErrUnknownDictionary - } - if debugDecoder { - println("setting dict", frame.DictionaryID) - } - frame.history.setDict(&dict) + if err = d.setDict(frame); err != nil { + return nil, err } if frame.WindowSize > d.o.maxWindowSize { if debugDecoder { @@ -463,12 +455,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) { } if len(next.b) > 0 { - n, err := d.current.crc.Write(next.b) - if err == nil { - if n != len(next.b) { - d.current.err = io.ErrShortWrite - } - } + d.current.crc.Write(next.b) } if next.err == nil && next.d != nil && next.d.hasCRC { got := uint32(d.current.crc.Sum64()) @@ -495,18 +482,12 @@ func (d *Decoder) nextBlockSync() (ok bool) { if !d.syncStream.inFrame { d.frame.history.reset() d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err == nil { + d.current.err = d.setDict(d.frame) + } if d.current.err != nil { return false } - if d.frame.DictionaryID != nil { - dict, ok := d.dicts[*d.frame.DictionaryID] - if !ok { - d.current.err = ErrUnknownDictionary - return false - } else { - d.frame.history.setDict(&dict) - } - } if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { d.current.err = ErrDecoderSizeExceeded return false @@ -865,13 +846,8 @@ decodeStream: if debugDecoder && err != nil { println("Frame decoder returned", err) } - if err == nil && frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - err = ErrUnknownDictionary - } else { - frame.history.setDict(&dict) - } + if err == nil { + err = d.setDict(frame) } if err == nil && d.frame.WindowSize > d.o.maxWindowSize { if debugDecoder { @@ -953,3 +929,20 @@ decodeStream: hist.reset() d.frame.history.b = frameHistCache } + +func (d *Decoder) setDict(frame *frameDec) (err error) { + dict, ok := d.dicts[frame.DictionaryID] + if ok { + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(dict) + } else if frame.DictionaryID != 0 { + // A zero or missing dictionary id is ambiguous: + // either dictionary zero, or no dictionary. In particular, + // zstd --patch-from uses this id for the source file, + // so only return an error if the dictionary id is not zero. + err = ErrUnknownDictionary + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index f42448e6..07a90dd7 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -6,6 +6,8 @@ package zstd import ( "errors" + "fmt" + "math/bits" "runtime" ) @@ -18,7 +20,7 @@ type decoderOptions struct { concurrent int maxDecodedSize uint64 maxWindowSize uint64 - dicts []dict + dicts []*dict ignoreChecksum bool limitToCap bool decodeBufsBelow int @@ -85,7 +87,13 @@ func WithDecoderMaxMemory(n uint64) DOption { } // WithDecoderDicts allows to register one or more dictionaries for the decoder. -// If several dictionaries with the same ID is provided the last one will be used. +// +// Each slice in dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// If several dictionaries with the same ID are provided, the last one will be used. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithDecoderDicts(dicts ...[]byte) DOption { return func(o *decoderOptions) error { for _, b := range dicts { @@ -93,12 +101,24 @@ func WithDecoderDicts(dicts ...[]byte) DOption { if err != nil { return err } - o.dicts = append(o.dicts, *d) + o.dicts = append(o.dicts, d) } return nil } } +// WithEncoderDictRaw registers a dictionary that may be used by the decoder. +// The slice content can be arbitrary data. +func WithDecoderDictRaw(id uint32, content []byte) DOption { + return func(o *decoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) + return nil + } +} + // WithDecoderMaxWindow allows to set a maximum window size for decodes. // This allows rejecting packets that will cause big memory usage. // The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index b2725f77..ca095145 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -21,6 +21,9 @@ type dict struct { const dictMagic = "\x37\xa4\x30\xec" +// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. +const dictMaxLength = 1 << 31 + // ID returns the dictionary id or 0 if d is nil. func (d *dict) ID() uint32 { if d == nil { @@ -29,14 +32,38 @@ func (d *dict) ID() uint32 { return d.id } -// DictContentSize returns the dictionary content size or 0 if d is nil. -func (d *dict) DictContentSize() int { +// ContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) ContentSize() int { if d == nil { return 0 } return len(d.content) } +// Content returns the dictionary content. +func (d *dict) Content() []byte { + if d == nil { + return nil + } + return d.content +} + +// Offsets returns the initial offsets. +func (d *dict) Offsets() [3]int { + if d == nil { + return [3]int{} + } + return d.offsets +} + +// LitEncoder returns the literal encoder. +func (d *dict) LitEncoder() *huff0.Scratch { + if d == nil { + return nil + } + return d.litEnc +} + // Load a dictionary as described in // https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format func loadDict(b []byte) (*dict, error) { @@ -61,7 +88,7 @@ func loadDict(b []byte) (*dict, error) { var err error d.litEnc, b, err = huff0.ReadTable(b[8:], nil) if err != nil { - return nil, err + return nil, fmt.Errorf("loading literal table: %w", err) } d.litEnc.Reuse = huff0.ReusePolicyMust @@ -119,3 +146,16 @@ func loadDict(b []byte) (*dict, error) { return &d, nil } + +// InspectDictionary loads a zstd dictionary and provides functions to inspect the content. +func InspectDictionary(b []byte) (interface { + ID() uint32 + ContentSize() int + Content() []byte + Offsets() [3]int + LitEncoder() *huff0.Scratch +}, error) { + initPredefined() + d, err := loadDict(b) + return d, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index bfb2e146..e008b992 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -149,7 +149,7 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) { if singleBlock { e.lowMem = true } - e.ensureHist(d.DictContentSize() + maxCompressedBlockSize) + e.ensureHist(d.ContentSize() + maxCompressedBlockSize) e.lowMem = low } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index 830f5ba7..9819d414 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -32,10 +32,9 @@ type match struct { length int32 rep int32 est int32 - _ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes } -const highScore = 25000 +const highScore = maxMatchLen * 8 // estBits will estimate output bits from predefined tables. func (m *match) estBits(bitsPerByte int32) { @@ -160,7 +159,6 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { // nextEmit is where in src the next emitLiteral should start from. nextEmit := s - cv := load6432(src, s) // Relative offsets offset1 := int32(blk.recentOffsets[0]) @@ -174,7 +172,6 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - _ = addLiterals if debugEncoder { println("recent offsets:", blk.recentOffsets) @@ -189,53 +186,96 @@ encodeLoop: panic("offset0 was 0") } - bestOf := func(a, b *match) *match { - if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 { - return a - } - return b - } - const goodEnough = 100 + const goodEnough = 250 + + cv := load6432(src, s) nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] - matchAt := func(offset int32, s int32, first uint32, rep int32) match { + // Set m to a match at offset if it looks like that will improve compression. + improve := func(m *match, offset int32, s int32, first uint32, rep int32) { if s-offset >= e.maxMatchOff || load3232(src, offset) != first { - return match{s: s, est: highScore} + return } if debugAsserts { + if offset <= 0 { + panic(offset) + } if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) } } - m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} - m.estBits(bitsPerByte) - return m + // Try to quick reject if we already have a long match. + if m.length > 16 { + left := len(src) - int(m.s+m.length) + // If we are too close to the end, keep as is. + if left <= 0 { + return + } + checkLen := m.length - (s - m.s) - 8 + if left > 2 && checkLen > 4 { + // Check 4 bytes, 4 bytes from the end of the current match. + a := load3232(src, offset+checkLen) + b := load3232(src, s+checkLen) + if a != b { + return + } + } + } + l := 4 + e.matchlen(s+4, offset+4, src) + if rep < 0 { + // Extend candidate match backwards as far as possible. + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { + s-- + offset-- + l++ + } + } + + cand := match{offset: offset, s: s, length: l, rep: rep} + cand.estBits(bitsPerByte) + if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { + *m = cand + } } - m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) - m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) - m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) - m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1) - best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4)) + best := match{s: s, est: highScore} + improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1) if canRepeat && best.length < goodEnough { - cv32 := uint32(cv >> 8) - spp := s + 1 - m1 := matchAt(spp-offset1, spp, cv32, 1) - m2 := matchAt(spp-offset2, spp, cv32, 2) - m3 := matchAt(spp-offset3, spp, cv32, 3) - best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3)) - if best.length > 0 { - cv32 = uint32(cv >> 24) - spp += 2 - m1 := matchAt(spp-offset1, spp, cv32, 1) - m2 := matchAt(spp-offset2, spp, cv32, 2) - m3 := matchAt(spp-offset3, spp, cv32, 3) - best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3)) + if s == nextEmit { + // Check repeats straight after a match. + improve(&best, s-offset2, s, uint32(cv), 1|4) + improve(&best, s-offset3, s, uint32(cv), 2|4) + if offset1 > 1 { + improve(&best, s-(offset1-1), s, uint32(cv), 3|4) + } + } + + // If either no match or a non-repeat match, check at + 1 + if best.rep <= 0 { + cv32 := uint32(cv >> 8) + spp := s + 1 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + if best.rep < 0 { + cv32 = uint32(cv >> 24) + spp += 2 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + } } } // Load next and check... @@ -250,47 +290,45 @@ encodeLoop: if s >= sLimit { break encodeLoop } - cv = load6432(src, s) continue } - s++ candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] - cv = load6432(src, s) - cv2 := load6432(src, s+1) + cv = load6432(src, s+1) + cv2 := load6432(src, s+2) candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] // Short at s+1 - m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1) // Long at s+1, s+2 - m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) - m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) - m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1) - m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1) - best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5)) + improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1) + improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1) if false { // Short at s+3. // Too often worse... - m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1) - best = bestOf(best, &m) + improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1) } - // See if we can find a better match by checking where the current best ends. - // Use that offset to see if we can find a better full match. - if sAt := best.s + best.length; sAt < sLimit { - nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) - candidateEnd := e.longTable[nextHashL] - // Start check at a fixed offset to allow for a few mismatches. - // For this compression level 2 yields the best results. - const skipBeginning = 2 - if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 { - m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) - bestEnd := bestOf(best, &m) - if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 { - m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) - bestEnd = bestOf(bestEnd, &m) + + // Start check at a fixed offset to allow for a few mismatches. + // For this compression level 2 yields the best results. + // We cannot do this if we have already indexed this position. + const skipBeginning = 2 + if best.s > s-skipBeginning { + // See if we can find a better match by checking where the current best ends. + // Use that offset to see if we can find a better full match. + if sAt := best.s + best.length; sAt < sLimit { + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) + candidateEnd := e.longTable[nextHashL] + + if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + } } - best = bestEnd } } } @@ -303,51 +341,34 @@ encodeLoop: // We have a match, we can store the forward value if best.rep > 0 { - s = best.s var seq seq seq.matchLen = uint32(best.length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := best.s - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - repIndex := best.offset - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ + if debugAsserts && s <= nextEmit { + panic("s <= nextEmit") } - addLiterals(&seq, start) + addLiterals(&seq, best.s) - // rep 0 - seq.offset = uint32(best.rep) + // Repeat. If bit 4 is set, this is a non-lit repeat. + seq.offset = uint32(best.rep & 3) if debugSequences { println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - // Index match start+1 (long) -> s - 1 - index0 := s + // Index old s + 1 -> s - 1 + index0 := s + 1 s = best.s + best.length nextEmit = s if s >= sLimit { if debugEncoder { println("repeat ended", s, best.length) - } break encodeLoop } // Index skipped... off := index0 + e.cur - for index0 < s-1 { + for index0 < s { cv0 := load6432(src, index0) h0 := hashLen(cv0, bestLongTableBits, bestLongLen) h1 := hashLen(cv0, bestShortTableBits, bestShortLen) @@ -357,17 +378,19 @@ encodeLoop: index0++ } switch best.rep { - case 2: + case 2, 4 | 1: offset1, offset2 = offset2, offset1 - case 3: + case 3, 4 | 2: offset1, offset2, offset3 = offset3, offset1, offset2 + case 4 | 3: + offset1, offset2, offset3 = offset1-1, offset1, offset2 } - cv = load6432(src, s) continue } // A 4-byte match has been found. Update recent offsets. // We'll later see if more than 4 bytes. + index0 := s + 1 s = best.s t := best.offset offset1, offset2, offset3 = s-t, offset1, offset2 @@ -380,22 +403,9 @@ encodeLoop: panic("invalid offset") } - // Extend the n-byte match as long as possible. - l := best.length - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - // Write our sequence var seq seq + l := best.length seq.litLen = uint32(s - nextEmit) seq.matchLen = uint32(l - zstdMinMatch) if seq.litLen > 0 { @@ -412,10 +422,8 @@ encodeLoop: break encodeLoop } - // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 - // every entry - for index0 < s-1 { + // Index old s + 1 -> s - 1 + for index0 < s { cv0 := load6432(src, index0) h0 := hashLen(cv0, bestLongTableBits, bestLongLen) h1 := hashLen(cv0, bestShortTableBits, bestShortLen) @@ -424,50 +432,6 @@ encodeLoop: e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} index0++ } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) - nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } } if int(nextEmit) < len(src) { diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 65c6c36d..4de0aed0 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -277,23 +277,9 @@ func (e *Encoder) nextBlock(final bool) error { s.eofWritten = true } - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(src) != len(blk.literals) || len(src) != e.o.blockSize { - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - blk.encodeRaw(src) - // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. - case nil: - default: - s.err = err - return err + s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.err != nil { + return s.err } _, s.err = s.w.Write(blk.output) s.nWritten += int64(len(blk.output)) @@ -343,22 +329,8 @@ func (e *Encoder) nextBlock(final bool) error { } s.wWg.Done() }() - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(src) != len(blk.literals) || len(src) != e.o.blockSize { - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - blk.encodeRaw(src) - // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. - case nil: - default: - s.writeErr = err + s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.writeErr != nil { return } _, s.writeErr = s.w.Write(blk.output) @@ -568,25 +540,15 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { // If we got the exact same number of literals as input, // assume the literals cannot be compressed. - err := errIncompressible oldout := blk.output - if len(blk.literals) != len(src) || len(src) != e.o.blockSize { - // Output directly to dst - blk.output = dst - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } + // Output directly to dst + blk.output = dst - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - dst = blk.encodeRawTo(dst, src) - case nil: - dst = blk.output - default: + err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { panic(err) } + dst = blk.output blk.output = oldout } else { enc.Reset(e.o.dict, false) @@ -605,25 +567,11 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { if len(src) == 0 { blk.last = true } - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { - err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) - } - - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - dst = blk.encodeRawTo(dst, todo) - blk.popOffsets() - case nil: - dst = append(dst, blk.output...) - default: + err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { panic(err) } + dst = append(dst, blk.output...) blk.reset(nil) } } diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 6015f498..50f70533 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "math" + "math/bits" "runtime" "strings" ) @@ -38,7 +39,7 @@ func (o *encoderOptions) setDefault() { blockSize: maxCompressedBlockSize, windowSize: 8 << 20, level: SpeedDefault, - allLitEntropy: true, + allLitEntropy: false, lowMem: false, } } @@ -237,7 +238,7 @@ func WithEncoderLevel(l EncoderLevel) EOption { } } if !o.customALEntropy { - o.allLitEntropy = l > SpeedFastest + o.allLitEntropy = l > SpeedDefault } return nil @@ -305,7 +306,13 @@ func WithLowerEncoderMem(b bool) EOption { } // WithEncoderDict allows to register a dictionary that will be used for the encode. +// +// The slice dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// // The encoder *may* choose to use no dictionary instead for certain payloads. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithEncoderDict(dict []byte) EOption { return func(o *encoderOptions) error { d, err := loadDict(dict) @@ -316,3 +323,17 @@ func WithEncoderDict(dict []byte) EOption { return nil } } + +// WithEncoderDictRaw registers a dictionary that may be used by the encoder. +// +// The slice content may contain arbitrary data. It will be used as an initial +// history. +func WithEncoderDictRaw(id uint32, content []byte) EOption { + return func(o *encoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 65984bf0..cc0aa227 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -29,7 +29,7 @@ type frameDec struct { FrameContentSize uint64 - DictionaryID *uint32 + DictionaryID uint32 HasCheckSum bool SingleSegment bool } @@ -155,7 +155,7 @@ func (d *frameDec) reset(br byteBuffer) error { // Read Dictionary_ID // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - d.DictionaryID = nil + d.DictionaryID = 0 if size := fhd & 3; size != 0 { if size == 3 { size = 4 @@ -178,11 +178,7 @@ func (d *frameDec) reset(br byteBuffer) error { if debugDecoder { println("Dict size", size, "ID:", id) } - if id > 0 { - // ID 0 means "sorry, no dictionary anyway". - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format - d.DictionaryID = &id - } + d.DictionaryID = id } // Read Frame_Content_Size @@ -297,13 +293,9 @@ func (d *frameDec) next(block *blockDec) error { return nil } -// checkCRC will check the checksum if the frame has one. +// checkCRC will check the checksum, assuming the frame has one. // Will return ErrCRCMismatch if crc check failed, otherwise nil. func (d *frameDec) checkCRC() error { - if !d.HasCheckSum { - return nil - } - // We can overwrite upper tmp now buf, err := d.rawInput.readSmall(4) if err != nil { @@ -311,10 +303,6 @@ func (d *frameDec) checkCRC() error { return err } - if d.o.ignoreChecksum { - return nil - } - want := binary.LittleEndian.Uint32(buf[:4]) got := uint32(d.crc.Sum64()) @@ -330,17 +318,13 @@ func (d *frameDec) checkCRC() error { return nil } -// consumeCRC reads the checksum data if the frame has one. +// consumeCRC skips over the checksum, assuming the frame has one. func (d *frameDec) consumeCRC() error { - if d.HasCheckSum { - _, err := d.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - return err - } + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) } - - return nil + return err } // runDecoder will run the decoder for the remainder of the frame. @@ -419,15 +403,8 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { if d.o.ignoreChecksum { err = d.consumeCRC() } else { - var n int - n, err = d.crc.Write(dst[crcStart:]) - if err == nil { - if n != len(dst)-crcStart { - err = io.ErrShortWrite - } else { - err = d.checkCRC() - } - } + d.crc.Write(dst[crcStart:]) + err = d.checkCRC() } } } diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index f833d154..9405fcf1 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -236,9 +236,12 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { maxBlockSize = s.windowSize } + if debugDecoder { + println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") + } for i := seqs - 1; i >= 0; i-- { if br.overread() { - printf("reading sequence %d, exceeded available data\n", seqs-i) + printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain()) return io.ErrUnexpectedEOF } var ll, mo, ml int @@ -314,9 +317,6 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { } size := ll + ml + len(out) if size-startSize > maxBlockSize { - if size-startSize == 424242 { - panic("here") - } return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) } if size > cap(out) { @@ -427,8 +427,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { } } - // Check if space for literals - if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize { + if size := len(s.literals) + len(out) - startSize; size > maxBlockSize { return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) } diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index 191384ad..8adabd82 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -5,6 +5,7 @@ package zstd import ( "fmt" + "io" "github.com/klauspost/compress/internal/cpuinfo" ) @@ -134,6 +135,9 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ctx.ll, ctx.litRemain+ctx.ll) + case errorOverread: + return true, io.ErrUnexpectedEOF + case errorNotEnoughSpace: size := ctx.outPosition + ctx.ll + ctx.ml if debugDecoder { @@ -148,7 +152,6 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { s.seqSize += ctx.litRemain if s.seqSize > maxBlockSize { return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } err := br.close() if err != nil { @@ -203,6 +206,9 @@ const errorNotEnoughLiterals = 4 // error reported when capacity of `out` is too small const errorNotEnoughSpace = 5 +// error reported when bits are overread. +const errorOverread = 6 + // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. // // Please refer to seqdec_generic.go for the reference implementation. @@ -248,6 +254,10 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { litRemain: len(s.literals), } + if debugDecoder { + println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream") + } + s.seqSize = 0 lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 var errCode int @@ -278,6 +288,8 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { case errorNotEnoughLiterals: ll := ctx.seqs[i].ll return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + case errorOverread: + return io.ErrUnexpectedEOF } return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) @@ -292,6 +304,9 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { if s.seqSize > maxBlockSize { return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) } + if debugDecoder { + println("decode: ", br.remain(), "bits remain on stream. code:", errCode) + } err := br.close() if err != nil { printf("Closing sequences: %v, %+v\n", err, *br) diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index b94993a0..b6f4ba6f 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -38,7 +38,7 @@ sequenceDecs_decode_amd64_main_loop: sequenceDecs_decode_amd64_fill_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_end + JLE sequenceDecs_decode_amd64_fill_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decode_amd64_fill_end SHLQ $0x08, DX @@ -49,6 +49,10 @@ sequenceDecs_decode_amd64_fill_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decode_amd64_fill_byte_by_byte +sequenceDecs_decode_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decode_amd64_fill_end: // Update offset MOVQ R9, AX @@ -105,7 +109,7 @@ sequenceDecs_decode_amd64_ml_update_zero: sequenceDecs_decode_amd64_fill_2_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_2_end + JLE sequenceDecs_decode_amd64_fill_2_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decode_amd64_fill_2_end SHLQ $0x08, DX @@ -116,6 +120,10 @@ sequenceDecs_decode_amd64_fill_2_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte +sequenceDecs_decode_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decode_amd64_fill_2_end: // Update literal length MOVQ DI, AX @@ -320,6 +328,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: CMOV TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 @@ -356,7 +369,7 @@ sequenceDecs_decode_56_amd64_main_loop: sequenceDecs_decode_56_amd64_fill_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decode_56_amd64_fill_end + JLE sequenceDecs_decode_56_amd64_fill_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decode_56_amd64_fill_end SHLQ $0x08, DX @@ -367,6 +380,10 @@ sequenceDecs_decode_56_amd64_fill_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte +sequenceDecs_decode_56_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decode_56_amd64_fill_end: // Update offset MOVQ R9, AX @@ -613,6 +630,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: BMI, BMI2, CMOV TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 @@ -649,7 +671,7 @@ sequenceDecs_decode_bmi2_main_loop: sequenceDecs_decode_bmi2_fill_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_end + JLE sequenceDecs_decode_bmi2_fill_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decode_bmi2_fill_end SHLQ $0x08, AX @@ -660,6 +682,10 @@ sequenceDecs_decode_bmi2_fill_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decode_bmi2_fill_byte_by_byte +sequenceDecs_decode_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decode_bmi2_fill_end: // Update offset MOVQ $0x00000808, CX @@ -700,7 +726,7 @@ sequenceDecs_decode_bmi2_fill_end: sequenceDecs_decode_bmi2_fill_2_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_2_end + JLE sequenceDecs_decode_bmi2_fill_2_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decode_bmi2_fill_2_end SHLQ $0x08, AX @@ -711,6 +737,10 @@ sequenceDecs_decode_bmi2_fill_2_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte +sequenceDecs_decode_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decode_bmi2_fill_2_end: // Update literal length MOVQ $0x00000808, CX @@ -889,6 +919,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: BMI, BMI2, CMOV TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 @@ -925,7 +960,7 @@ sequenceDecs_decode_56_bmi2_main_loop: sequenceDecs_decode_56_bmi2_fill_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decode_56_bmi2_fill_end + JLE sequenceDecs_decode_56_bmi2_fill_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decode_56_bmi2_fill_end SHLQ $0x08, AX @@ -936,6 +971,10 @@ sequenceDecs_decode_56_bmi2_fill_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte +sequenceDecs_decode_56_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decode_56_bmi2_fill_end: // Update offset MOVQ $0x00000808, CX @@ -1140,6 +1179,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool // Requires: SSE TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 @@ -1804,7 +1848,7 @@ sequenceDecs_decodeSync_amd64_main_loop: sequenceDecs_decodeSync_amd64_fill_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_end + JLE sequenceDecs_decodeSync_amd64_fill_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decodeSync_amd64_fill_end SHLQ $0x08, DX @@ -1815,6 +1859,10 @@ sequenceDecs_decodeSync_amd64_fill_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte +sequenceDecs_decodeSync_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decodeSync_amd64_fill_end: // Update offset MOVQ R9, AX @@ -1871,7 +1919,7 @@ sequenceDecs_decodeSync_amd64_ml_update_zero: sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_2_end + JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decodeSync_amd64_fill_2_end SHLQ $0x08, DX @@ -1882,6 +1930,10 @@ sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte +sequenceDecs_decodeSync_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decodeSync_amd64_fill_2_end: // Update literal length MOVQ DI, AX @@ -2291,6 +2343,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // Return with not enough output space error error_not_enough_space: MOVQ ctx+16(FP), AX @@ -2356,7 +2413,7 @@ sequenceDecs_decodeSync_bmi2_main_loop: sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_end + JLE sequenceDecs_decodeSync_bmi2_fill_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decodeSync_bmi2_fill_end SHLQ $0x08, AX @@ -2367,6 +2424,10 @@ sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte +sequenceDecs_decodeSync_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decodeSync_bmi2_fill_end: // Update offset MOVQ $0x00000808, CX @@ -2407,7 +2468,7 @@ sequenceDecs_decodeSync_bmi2_fill_end: sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_2_end + JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decodeSync_bmi2_fill_2_end SHLQ $0x08, AX @@ -2418,6 +2479,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte +sequenceDecs_decodeSync_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decodeSync_bmi2_fill_2_end: // Update literal length MOVQ $0x00000808, CX @@ -2801,6 +2866,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // Return with not enough output space error error_not_enough_space: MOVQ ctx+16(FP), AX @@ -2866,7 +2936,7 @@ sequenceDecs_decodeSync_safe_amd64_main_loop: sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_end + JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decodeSync_safe_amd64_fill_end SHLQ $0x08, DX @@ -2877,6 +2947,10 @@ sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte +sequenceDecs_decodeSync_safe_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decodeSync_safe_amd64_fill_end: // Update offset MOVQ R9, AX @@ -2933,7 +3007,7 @@ sequenceDecs_decodeSync_safe_amd64_ml_update_zero: sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end SHLQ $0x08, DX @@ -2944,6 +3018,10 @@ sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte +sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decodeSync_safe_amd64_fill_2_end: // Update literal length MOVQ DI, AX @@ -3455,6 +3533,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // Return with not enough output space error error_not_enough_space: MOVQ ctx+16(FP), AX @@ -3520,7 +3603,7 @@ sequenceDecs_decodeSync_safe_bmi2_main_loop: sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decodeSync_safe_bmi2_fill_end SHLQ $0x08, AX @@ -3531,6 +3614,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte +sequenceDecs_decodeSync_safe_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decodeSync_safe_bmi2_fill_end: // Update offset MOVQ $0x00000808, CX @@ -3571,7 +3658,7 @@ sequenceDecs_decodeSync_safe_bmi2_fill_end: sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end SHLQ $0x08, AX @@ -3582,6 +3669,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte +sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decodeSync_safe_bmi2_fill_2_end: // Update literal length MOVQ $0x00000808, CX @@ -4067,6 +4158,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // Return with not enough output space error error_not_enough_space: MOVQ ctx+16(FP), AX diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index b1886f7c..89396673 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -72,7 +72,6 @@ var ( ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") // ErrUnknownDictionary is returned if the dictionary ID is unknown. - // For the time being dictionaries are not supported. ErrUnknownDictionary = errors.New("unknown dictionary") // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. @@ -129,11 +128,11 @@ func matchLen(a, b []byte) (n int) { } func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[i:]) + return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) } func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[i:]) + return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) } type byter interface { diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md index 857a93e5..37b5167d 100644 --- a/vendor/github.com/klauspost/cpuid/v2/README.md +++ b/vendor/github.com/klauspost/cpuid/v2/README.md @@ -19,6 +19,12 @@ Package home: https://github.com/klauspost/cpuid `go get -u github.com/klauspost/cpuid/v2` using modules. Drop `v2` for others. +Installing binary: + +`go install github.com/klauspost/cpuid/v2/cmd/cpuid@latest` + +Or download binaries from release page: https://github.com/klauspost/cpuid/releases + ### Homebrew For macOS/Linux users, you can install via [brew](https://brew.sh/) @@ -302,6 +308,7 @@ Exit Code 1 | AVXSLOW | Indicates the CPU performs 2 128 bit operations instead of one | | AVXVNNI | AVX (VEX encoded) VNNI neural network instructions | | AVXVNNIINT8 | AVX-VNNI-INT8 instructions | +| BHI_CTRL | Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 | | BMI1 | Bit Manipulation Instruction Set 1 | | BMI2 | Bit Manipulation Instruction Set 2 | | CETIBT | Intel CET Indirect Branch Tracking | @@ -355,6 +362,7 @@ Exit Code 1 | IBS_OPFUSE | AMD: Indicates support for IbsOpFuse | | IBS_PREVENTHOST | Disallowing IBS use by the host supported | | IBS_ZEN4 | Fetch and Op IBS support IBS extensions added with Zen4 | +| IDPRED_CTRL | IPRED_DIS | | INT_WBINVD | WBINVD/WBNOINVD are interruptible. | | INVLPGB | NVLPGB and TLBSYNC instruction supported | | LAHF | LAHF/SAHF in long mode | @@ -372,8 +380,9 @@ Exit Code 1 | MOVDIRI | Move Doubleword as Direct Store | | MOVSB_ZL | Fast Zero-Length MOVSB | | MPX | Intel MPX (Memory Protection Extensions) | -| MOVU | MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD | +| MOVU | MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD | | MSRIRC | Instruction Retired Counter MSR available | +| MSRLIST | Read/Write List of Model Specific Registers | | MSR_PAGEFLUSH | Page Flush MSR available | | NRIPS | Indicates support for NRIP save on VMEXIT | | NX | NX (No-Execute) bit | @@ -381,12 +390,13 @@ Exit Code 1 | PCONFIG | PCONFIG for Intel Multi-Key Total Memory Encryption | | POPCNT | POPCNT instruction | | PPIN | AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled | -| PREFETCHI | PREFETCHIT0/1 instructions | -| PSFD | AMD: Predictive Store Forward Disable | +| PREFETCHI | PREFETCHIT0/1 instructions | +| PSFD | Predictive Store Forward Disable | | RDPRU | RDPRU instruction supported | | RDRAND | RDRAND instruction is available | | RDSEED | RDSEED instruction is available | | RDTSCP | RDTSCP Instruction | +| RRSBA_CTRL | Restricted RSB Alternate | | RTM | Restricted Transactional Memory | | RTM_ALWAYS_ABORT | Indicates that the loaded microcode is forcing RTM abort. | | SERIALIZE | Serialize Instruction Execution | @@ -439,6 +449,7 @@ Exit Code 1 | VTE | AMD Virtual Transparent Encryption supported | | WAITPKG | TPAUSE, UMONITOR, UMWAIT | | WBNOINVD | Write Back and Do Not Invalidate Cache | +| WRMSRNS | Non-Serializing Write to Model Specific Register | | X87 | FPU | | XGETBV1 | Supports XGETBV with ECX = 1 | | XOP | Bulldozer XOP functions | diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go index cf2ae9c5..89a861d4 100644 --- a/vendor/github.com/klauspost/cpuid/v2/cpuid.go +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go @@ -99,6 +99,7 @@ const ( AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one AVXVNNI // AVX (VEX encoded) VNNI neural network instructions AVXVNNIINT8 // AVX-VNNI-INT8 instructions + BHI_CTRL // Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 BMI1 // Bit Manipulation Instruction Set 1 BMI2 // Bit Manipulation Instruction Set 2 CETIBT // Intel CET Indirect Branch Tracking @@ -152,6 +153,7 @@ const ( IBS_OPFUSE // AMD: Indicates support for IbsOpFuse IBS_PREVENTHOST // Disallowing IBS use by the host supported IBS_ZEN4 // AMD: Fetch and Op IBS support IBS extensions added with Zen4 + IDPRED_CTRL // IPRED_DIS INT_WBINVD // WBINVD/WBNOINVD are interruptible. INVLPGB // NVLPGB and TLBSYNC instruction supported LAHF // LAHF/SAHF in long mode @@ -171,6 +173,7 @@ const ( MOVU // AMD: MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD MPX // Intel MPX (Memory Protection Extensions) MSRIRC // Instruction Retired Counter MSR available + MSRLIST // Read/Write List of Model Specific Registers MSR_PAGEFLUSH // Page Flush MSR available NRIPS // Indicates support for NRIP save on VMEXIT NX // NX (No-Execute) bit @@ -179,11 +182,12 @@ const ( POPCNT // POPCNT instruction PPIN // AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled PREFETCHI // PREFETCHIT0/1 instructions - PSFD // AMD: Predictive Store Forward Disable + PSFD // Predictive Store Forward Disable RDPRU // RDPRU instruction supported RDRAND // RDRAND instruction is available RDSEED // RDSEED instruction is available RDTSCP // RDTSCP Instruction + RRSBA_CTRL // Restricted RSB Alternate RTM // Restricted Transactional Memory RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort. SERIALIZE // Serialize Instruction Execution @@ -236,6 +240,7 @@ const ( VTE // AMD Virtual Transparent Encryption supported WAITPKG // TPAUSE, UMONITOR, UMWAIT WBNOINVD // Write Back and Do Not Invalidate Cache + WRMSRNS // Non-Serializing Write to Model Specific Register X87 // FPU XGETBV1 // Supports XGETBV with ECX = 1 XOP // Bulldozer XOP functions @@ -1232,13 +1237,20 @@ func support() flagSet { fs.setIf(edx&(1<<25) != 0, AMXINT8) // eax1 = CPUID.(EAX=7, ECX=1).EAX fs.setIf(eax1&(1<<5) != 0, AVX512BF16) + fs.setIf(eax1&(1<<19) != 0, WRMSRNS) fs.setIf(eax1&(1<<21) != 0, AMXFP16) + fs.setIf(eax1&(1<<27) != 0, MSRLIST) } } // CPUID.(EAX=7, ECX=2) _, _, _, edx = cpuidex(7, 2) + fs.setIf(edx&(1<<0) != 0, PSFD) + fs.setIf(edx&(1<<1) != 0, IDPRED_CTRL) + fs.setIf(edx&(1<<2) != 0, RRSBA_CTRL) + fs.setIf(edx&(1<<4) != 0, BHI_CTRL) fs.setIf(edx&(1<<5) != 0, MCDT_NO) + } // Processor Extended State Enumeration Sub-leaf (EAX = 0DH, ECX = 1) diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go index 8b6cd2b7..2a27f44d 100644 --- a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go +++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go @@ -39,181 +39,186 @@ func _() { _ = x[AVXSLOW-29] _ = x[AVXVNNI-30] _ = x[AVXVNNIINT8-31] - _ = x[BMI1-32] - _ = x[BMI2-33] - _ = x[CETIBT-34] - _ = x[CETSS-35] - _ = x[CLDEMOTE-36] - _ = x[CLMUL-37] - _ = x[CLZERO-38] - _ = x[CMOV-39] - _ = x[CMPCCXADD-40] - _ = x[CMPSB_SCADBS_SHORT-41] - _ = x[CMPXCHG8-42] - _ = x[CPBOOST-43] - _ = x[CPPC-44] - _ = x[CX16-45] - _ = x[EFER_LMSLE_UNS-46] - _ = x[ENQCMD-47] - _ = x[ERMS-48] - _ = x[F16C-49] - _ = x[FLUSH_L1D-50] - _ = x[FMA3-51] - _ = x[FMA4-52] - _ = x[FP128-53] - _ = x[FP256-54] - _ = x[FSRM-55] - _ = x[FXSR-56] - _ = x[FXSROPT-57] - _ = x[GFNI-58] - _ = x[HLE-59] - _ = x[HRESET-60] - _ = x[HTT-61] - _ = x[HWA-62] - _ = x[HYBRID_CPU-63] - _ = x[HYPERVISOR-64] - _ = x[IA32_ARCH_CAP-65] - _ = x[IA32_CORE_CAP-66] - _ = x[IBPB-67] - _ = x[IBRS-68] - _ = x[IBRS_PREFERRED-69] - _ = x[IBRS_PROVIDES_SMP-70] - _ = x[IBS-71] - _ = x[IBSBRNTRGT-72] - _ = x[IBSFETCHSAM-73] - _ = x[IBSFFV-74] - _ = x[IBSOPCNT-75] - _ = x[IBSOPCNTEXT-76] - _ = x[IBSOPSAM-77] - _ = x[IBSRDWROPCNT-78] - _ = x[IBSRIPINVALIDCHK-79] - _ = x[IBS_FETCH_CTLX-80] - _ = x[IBS_OPDATA4-81] - _ = x[IBS_OPFUSE-82] - _ = x[IBS_PREVENTHOST-83] - _ = x[IBS_ZEN4-84] - _ = x[INT_WBINVD-85] - _ = x[INVLPGB-86] - _ = x[LAHF-87] - _ = x[LAM-88] - _ = x[LBRVIRT-89] - _ = x[LZCNT-90] - _ = x[MCAOVERFLOW-91] - _ = x[MCDT_NO-92] - _ = x[MCOMMIT-93] - _ = x[MD_CLEAR-94] - _ = x[MMX-95] - _ = x[MMXEXT-96] - _ = x[MOVBE-97] - _ = x[MOVDIR64B-98] - _ = x[MOVDIRI-99] - _ = x[MOVSB_ZL-100] - _ = x[MOVU-101] - _ = x[MPX-102] - _ = x[MSRIRC-103] - _ = x[MSR_PAGEFLUSH-104] - _ = x[NRIPS-105] - _ = x[NX-106] - _ = x[OSXSAVE-107] - _ = x[PCONFIG-108] - _ = x[POPCNT-109] - _ = x[PPIN-110] - _ = x[PREFETCHI-111] - _ = x[PSFD-112] - _ = x[RDPRU-113] - _ = x[RDRAND-114] - _ = x[RDSEED-115] - _ = x[RDTSCP-116] - _ = x[RTM-117] - _ = x[RTM_ALWAYS_ABORT-118] - _ = x[SERIALIZE-119] - _ = x[SEV-120] - _ = x[SEV_64BIT-121] - _ = x[SEV_ALTERNATIVE-122] - _ = x[SEV_DEBUGSWAP-123] - _ = x[SEV_ES-124] - _ = x[SEV_RESTRICTED-125] - _ = x[SEV_SNP-126] - _ = x[SGX-127] - _ = x[SGXLC-128] - _ = x[SHA-129] - _ = x[SME-130] - _ = x[SME_COHERENT-131] - _ = x[SPEC_CTRL_SSBD-132] - _ = x[SRBDS_CTRL-133] - _ = x[SSE-134] - _ = x[SSE2-135] - _ = x[SSE3-136] - _ = x[SSE4-137] - _ = x[SSE42-138] - _ = x[SSE4A-139] - _ = x[SSSE3-140] - _ = x[STIBP-141] - _ = x[STIBP_ALWAYSON-142] - _ = x[STOSB_SHORT-143] - _ = x[SUCCOR-144] - _ = x[SVM-145] - _ = x[SVMDA-146] - _ = x[SVMFBASID-147] - _ = x[SVML-148] - _ = x[SVMNP-149] - _ = x[SVMPF-150] - _ = x[SVMPFT-151] - _ = x[SYSCALL-152] - _ = x[SYSEE-153] - _ = x[TBM-154] - _ = x[TLB_FLUSH_NESTED-155] - _ = x[TME-156] - _ = x[TOPEXT-157] - _ = x[TSCRATEMSR-158] - _ = x[TSXLDTRK-159] - _ = x[VAES-160] - _ = x[VMCBCLEAN-161] - _ = x[VMPL-162] - _ = x[VMSA_REGPROT-163] - _ = x[VMX-164] - _ = x[VPCLMULQDQ-165] - _ = x[VTE-166] - _ = x[WAITPKG-167] - _ = x[WBNOINVD-168] - _ = x[X87-169] - _ = x[XGETBV1-170] - _ = x[XOP-171] - _ = x[XSAVE-172] - _ = x[XSAVEC-173] - _ = x[XSAVEOPT-174] - _ = x[XSAVES-175] - _ = x[AESARM-176] - _ = x[ARMCPUID-177] - _ = x[ASIMD-178] - _ = x[ASIMDDP-179] - _ = x[ASIMDHP-180] - _ = x[ASIMDRDM-181] - _ = x[ATOMICS-182] - _ = x[CRC32-183] - _ = x[DCPOP-184] - _ = x[EVTSTRM-185] - _ = x[FCMA-186] - _ = x[FP-187] - _ = x[FPHP-188] - _ = x[GPA-189] - _ = x[JSCVT-190] - _ = x[LRCPC-191] - _ = x[PMULL-192] - _ = x[SHA1-193] - _ = x[SHA2-194] - _ = x[SHA3-195] - _ = x[SHA512-196] - _ = x[SM3-197] - _ = x[SM4-198] - _ = x[SVE-199] - _ = x[lastID-200] + _ = x[BHI_CTRL-32] + _ = x[BMI1-33] + _ = x[BMI2-34] + _ = x[CETIBT-35] + _ = x[CETSS-36] + _ = x[CLDEMOTE-37] + _ = x[CLMUL-38] + _ = x[CLZERO-39] + _ = x[CMOV-40] + _ = x[CMPCCXADD-41] + _ = x[CMPSB_SCADBS_SHORT-42] + _ = x[CMPXCHG8-43] + _ = x[CPBOOST-44] + _ = x[CPPC-45] + _ = x[CX16-46] + _ = x[EFER_LMSLE_UNS-47] + _ = x[ENQCMD-48] + _ = x[ERMS-49] + _ = x[F16C-50] + _ = x[FLUSH_L1D-51] + _ = x[FMA3-52] + _ = x[FMA4-53] + _ = x[FP128-54] + _ = x[FP256-55] + _ = x[FSRM-56] + _ = x[FXSR-57] + _ = x[FXSROPT-58] + _ = x[GFNI-59] + _ = x[HLE-60] + _ = x[HRESET-61] + _ = x[HTT-62] + _ = x[HWA-63] + _ = x[HYBRID_CPU-64] + _ = x[HYPERVISOR-65] + _ = x[IA32_ARCH_CAP-66] + _ = x[IA32_CORE_CAP-67] + _ = x[IBPB-68] + _ = x[IBRS-69] + _ = x[IBRS_PREFERRED-70] + _ = x[IBRS_PROVIDES_SMP-71] + _ = x[IBS-72] + _ = x[IBSBRNTRGT-73] + _ = x[IBSFETCHSAM-74] + _ = x[IBSFFV-75] + _ = x[IBSOPCNT-76] + _ = x[IBSOPCNTEXT-77] + _ = x[IBSOPSAM-78] + _ = x[IBSRDWROPCNT-79] + _ = x[IBSRIPINVALIDCHK-80] + _ = x[IBS_FETCH_CTLX-81] + _ = x[IBS_OPDATA4-82] + _ = x[IBS_OPFUSE-83] + _ = x[IBS_PREVENTHOST-84] + _ = x[IBS_ZEN4-85] + _ = x[IDPRED_CTRL-86] + _ = x[INT_WBINVD-87] + _ = x[INVLPGB-88] + _ = x[LAHF-89] + _ = x[LAM-90] + _ = x[LBRVIRT-91] + _ = x[LZCNT-92] + _ = x[MCAOVERFLOW-93] + _ = x[MCDT_NO-94] + _ = x[MCOMMIT-95] + _ = x[MD_CLEAR-96] + _ = x[MMX-97] + _ = x[MMXEXT-98] + _ = x[MOVBE-99] + _ = x[MOVDIR64B-100] + _ = x[MOVDIRI-101] + _ = x[MOVSB_ZL-102] + _ = x[MOVU-103] + _ = x[MPX-104] + _ = x[MSRIRC-105] + _ = x[MSRLIST-106] + _ = x[MSR_PAGEFLUSH-107] + _ = x[NRIPS-108] + _ = x[NX-109] + _ = x[OSXSAVE-110] + _ = x[PCONFIG-111] + _ = x[POPCNT-112] + _ = x[PPIN-113] + _ = x[PREFETCHI-114] + _ = x[PSFD-115] + _ = x[RDPRU-116] + _ = x[RDRAND-117] + _ = x[RDSEED-118] + _ = x[RDTSCP-119] + _ = x[RRSBA_CTRL-120] + _ = x[RTM-121] + _ = x[RTM_ALWAYS_ABORT-122] + _ = x[SERIALIZE-123] + _ = x[SEV-124] + _ = x[SEV_64BIT-125] + _ = x[SEV_ALTERNATIVE-126] + _ = x[SEV_DEBUGSWAP-127] + _ = x[SEV_ES-128] + _ = x[SEV_RESTRICTED-129] + _ = x[SEV_SNP-130] + _ = x[SGX-131] + _ = x[SGXLC-132] + _ = x[SHA-133] + _ = x[SME-134] + _ = x[SME_COHERENT-135] + _ = x[SPEC_CTRL_SSBD-136] + _ = x[SRBDS_CTRL-137] + _ = x[SSE-138] + _ = x[SSE2-139] + _ = x[SSE3-140] + _ = x[SSE4-141] + _ = x[SSE42-142] + _ = x[SSE4A-143] + _ = x[SSSE3-144] + _ = x[STIBP-145] + _ = x[STIBP_ALWAYSON-146] + _ = x[STOSB_SHORT-147] + _ = x[SUCCOR-148] + _ = x[SVM-149] + _ = x[SVMDA-150] + _ = x[SVMFBASID-151] + _ = x[SVML-152] + _ = x[SVMNP-153] + _ = x[SVMPF-154] + _ = x[SVMPFT-155] + _ = x[SYSCALL-156] + _ = x[SYSEE-157] + _ = x[TBM-158] + _ = x[TLB_FLUSH_NESTED-159] + _ = x[TME-160] + _ = x[TOPEXT-161] + _ = x[TSCRATEMSR-162] + _ = x[TSXLDTRK-163] + _ = x[VAES-164] + _ = x[VMCBCLEAN-165] + _ = x[VMPL-166] + _ = x[VMSA_REGPROT-167] + _ = x[VMX-168] + _ = x[VPCLMULQDQ-169] + _ = x[VTE-170] + _ = x[WAITPKG-171] + _ = x[WBNOINVD-172] + _ = x[WRMSRNS-173] + _ = x[X87-174] + _ = x[XGETBV1-175] + _ = x[XOP-176] + _ = x[XSAVE-177] + _ = x[XSAVEC-178] + _ = x[XSAVEOPT-179] + _ = x[XSAVES-180] + _ = x[AESARM-181] + _ = x[ARMCPUID-182] + _ = x[ASIMD-183] + _ = x[ASIMDDP-184] + _ = x[ASIMDHP-185] + _ = x[ASIMDRDM-186] + _ = x[ATOMICS-187] + _ = x[CRC32-188] + _ = x[DCPOP-189] + _ = x[EVTSTRM-190] + _ = x[FCMA-191] + _ = x[FP-192] + _ = x[FPHP-193] + _ = x[GPA-194] + _ = x[JSCVT-195] + _ = x[LRCPC-196] + _ = x[PMULL-197] + _ = x[SHA1-198] + _ = x[SHA2-199] + _ = x[SHA3-200] + _ = x[SHA512-201] + _ = x[SM3-202] + _ = x[SM4-203] + _ = x[SVE-204] + _ = x[lastID-205] _ = x[firstID-0] } -const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4INT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" +const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" -var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 65, 69, 79, 91, 99, 107, 115, 123, 130, 140, 150, 158, 168, 179, 187, 197, 215, 230, 237, 249, 256, 263, 274, 278, 282, 288, 293, 301, 306, 312, 316, 325, 343, 351, 358, 362, 366, 380, 386, 390, 394, 403, 407, 411, 416, 421, 425, 429, 436, 440, 443, 449, 452, 455, 465, 475, 488, 501, 505, 509, 523, 540, 543, 553, 564, 570, 578, 589, 597, 609, 625, 639, 650, 660, 675, 683, 693, 700, 704, 707, 714, 719, 730, 737, 744, 752, 755, 761, 766, 775, 782, 790, 794, 797, 803, 816, 821, 823, 830, 837, 843, 847, 856, 860, 865, 871, 877, 883, 886, 902, 911, 914, 923, 938, 951, 957, 971, 978, 981, 986, 989, 992, 1004, 1018, 1028, 1031, 1035, 1039, 1043, 1048, 1053, 1058, 1063, 1077, 1088, 1094, 1097, 1102, 1111, 1115, 1120, 1125, 1131, 1138, 1143, 1146, 1162, 1165, 1171, 1181, 1189, 1193, 1202, 1206, 1218, 1221, 1231, 1234, 1241, 1249, 1252, 1259, 1262, 1267, 1273, 1281, 1287, 1293, 1301, 1306, 1313, 1320, 1328, 1335, 1340, 1345, 1352, 1356, 1358, 1362, 1365, 1370, 1375, 1380, 1384, 1388, 1392, 1398, 1401, 1404, 1407, 1413} +var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 65, 69, 79, 91, 99, 107, 115, 123, 130, 140, 150, 158, 168, 179, 187, 197, 215, 230, 237, 249, 256, 263, 274, 282, 286, 290, 296, 301, 309, 314, 320, 324, 333, 351, 359, 366, 370, 374, 388, 394, 398, 402, 411, 415, 419, 424, 429, 433, 437, 444, 448, 451, 457, 460, 463, 473, 483, 496, 509, 513, 517, 531, 548, 551, 561, 572, 578, 586, 597, 605, 617, 633, 647, 658, 668, 683, 691, 702, 712, 719, 723, 726, 733, 738, 749, 756, 763, 771, 774, 780, 785, 794, 801, 809, 813, 816, 822, 829, 842, 847, 849, 856, 863, 869, 873, 882, 886, 891, 897, 903, 909, 919, 922, 938, 947, 950, 959, 974, 987, 993, 1007, 1014, 1017, 1022, 1025, 1028, 1040, 1054, 1064, 1067, 1071, 1075, 1079, 1084, 1089, 1094, 1099, 1113, 1124, 1130, 1133, 1138, 1147, 1151, 1156, 1161, 1167, 1174, 1179, 1182, 1198, 1201, 1207, 1217, 1225, 1229, 1238, 1242, 1254, 1257, 1267, 1270, 1277, 1285, 1292, 1295, 1302, 1305, 1310, 1316, 1324, 1330, 1336, 1344, 1349, 1356, 1363, 1371, 1378, 1383, 1388, 1395, 1399, 1401, 1405, 1408, 1413, 1418, 1423, 1427, 1431, 1435, 1441, 1444, 1447, 1450, 1456} func (i FeatureID) String() string { if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) { diff --git a/vendor/github.com/koron/go-ssdp/Makefile b/vendor/github.com/koron/go-ssdp/Makefile index 7303aa1b..a077c5a2 100644 --- a/vendor/github.com/koron/go-ssdp/Makefile +++ b/vendor/github.com/koron/go-ssdp/Makefile @@ -6,11 +6,11 @@ build: .PHONY: test test: - go test ./... + go test -gcflags '-e' ./... -.PHONY: test-race -test-race: - go test -race . +.PHONY: bench +bench: + go test -bench ./... .PHONY: tags tags: @@ -19,20 +19,16 @@ tags: .PHONY: cover cover: mkdir -p tmp - go test -coverprofile tmp/_cover.out . + go test -coverprofile tmp/_cover.out . ./internal/... go tool cover -html tmp/_cover.out -o tmp/cover.html .PHONY: checkall -checkall: vet lint staticcheck +checkall: vet staticcheck .PHONY: vet vet: go vet ./... -.PHONY: lint -lint: - golint ./... - .PHONY: staticcheck staticcheck: staticcheck ./... @@ -45,6 +41,10 @@ clean: examples-clean # based on: github.com/koron-go/_skeleton/Makefile +.PHONY: test-race +test-race: + go test -race . + .PHONY: examples examples: examples-build diff --git a/vendor/github.com/koron/go-ssdp/advertise.go b/vendor/github.com/koron/go-ssdp/advertise.go index 81d94b73..e64fcde5 100644 --- a/vendor/github.com/koron/go-ssdp/advertise.go +++ b/vendor/github.com/koron/go-ssdp/advertise.go @@ -8,42 +8,50 @@ import ( "net" "net/http" "sync" + + "github.com/koron/go-ssdp/internal/multicast" + "github.com/koron/go-ssdp/internal/ssdplog" ) type message struct { to net.Addr - data []byte + data multicast.DataProvider } // Advertiser is a server to advertise a service. type Advertiser struct { - st string - usn string - location string - server string - maxAge int + st string + usn string + locProv LocationProvider + server string + maxAge int - conn *multicastConn + conn *multicast.Conn ch chan *message wg sync.WaitGroup wgS sync.WaitGroup } // Advertise starts advertisement of service. -func Advertise(st, usn, location, server string, maxAge int) (*Advertiser, error) { - conn, err := multicastListen(recvAddrResolver) +// location should be a string or a ssdp.LocationProvider. +func Advertise(st, usn string, location interface{}, server string, maxAge int) (*Advertiser, error) { + locProv, err := toLocationProvider(location) + if err != nil { + return nil, err + } + conn, err := multicast.Listen(multicast.RecvAddrResolver) if err != nil { return nil, err } - logf("SSDP advertise on: %s", conn.LocalAddr().String()) + ssdplog.Printf("SSDP advertise on: %s", conn.LocalAddr().String()) a := &Advertiser{ - st: st, - usn: usn, - location: location, - server: server, - maxAge: maxAge, - conn: conn, - ch: make(chan *message), + st: st, + usn: usn, + locProv: locProv, + server: server, + maxAge: maxAge, + conn: conn, + ch: make(chan *message), } a.wg.Add(2) a.wgS.Add(1) @@ -60,9 +68,10 @@ func Advertise(st, usn, location, server string, maxAge int) (*Advertiser, error } func (a *Advertiser) recvMain() error { - err := a.conn.readPackets(0, func(addr net.Addr, data []byte) error { + // TODO: update listening interfaces of a.conn + err := a.conn.ReadPackets(0, func(addr net.Addr, data []byte) error { if err := a.handleRaw(addr, data); err != nil { - logf("failed to handle message: %s", err) + ssdplog.Printf("failed to handle message: %s", err) } return nil }) @@ -72,16 +81,13 @@ func (a *Advertiser) recvMain() error { return nil } -func (a *Advertiser) sendMain() error { +func (a *Advertiser) sendMain() { for msg := range a.ch { _, err := a.conn.WriteTo(msg.data, msg.to) if err != nil { - if nerr, ok := err.(net.Error); !ok || !nerr.Temporary() { - logf("failed to send: %s", err) - } + ssdplog.Printf("failed to send: %s", err) } } - return nil } func (a *Advertiser) handleRaw(from net.Addr, raw []byte) error { @@ -104,19 +110,16 @@ func (a *Advertiser) handleRaw(from net.Addr, raw []byte) error { // skip when ST is not matched/expected. return nil } - logf("received M-SEARCH MAN=%s ST=%s from %s", man, st, from.String()) + ssdplog.Printf("received M-SEARCH MAN=%s ST=%s from %s", man, st, from.String()) // build and send a response. - msg, err := buildOK(a.st, a.usn, a.location, a.server, a.maxAge) - if err != nil { - return err - } - a.ch <- &message{to: from, data: msg} + msg := buildOK(a.st, a.usn, a.locProv.Location(from, nil), a.server, a.maxAge) + a.ch <- &message{to: from, data: multicast.BytesDataProvider(msg)} return nil } -func buildOK(st, usn, location, server string, maxAge int) ([]byte, error) { +func buildOK(st, usn, location, server string, maxAge int) []byte { + // bytes.Buffer#Write() is never fail, so we can omit error checks. b := new(bytes.Buffer) - // FIXME: error should be checked. b.WriteString("HTTP/1.1 200 OK\r\n") fmt.Fprintf(b, "EXT: \r\n") fmt.Fprintf(b, "ST: %s\r\n", st) @@ -129,7 +132,7 @@ func buildOK(st, usn, location, server string, maxAge int) ([]byte, error) { } fmt.Fprintf(b, "CACHE-CONTROL: max-age=%d\r\n", maxAge) b.WriteString("\r\n") - return b.Bytes(), nil + return b.Bytes() } // Close stops advertisement. @@ -149,23 +152,26 @@ func (a *Advertiser) Close() error { // Alive announces ssdp:alive message. func (a *Advertiser) Alive() error { - addr, err := multicastSendAddr() + addr, err := multicast.SendAddr() if err != nil { return err } - msg, err := buildAlive(addr, a.st, a.usn, a.location, a.server, - a.maxAge) - if err != nil { - return err + msg := &aliveDataProvider{ + host: addr, + nt: a.st, + usn: a.usn, + location: a.locProv, + server: a.server, + maxAge: a.maxAge, } a.ch <- &message{to: addr, data: msg} - logf("sent alive") + ssdplog.Printf("sent alive") return nil } // Bye announces ssdp:byebye message. func (a *Advertiser) Bye() error { - addr, err := multicastSendAddr() + addr, err := multicast.SendAddr() if err != nil { return err } @@ -173,7 +179,7 @@ func (a *Advertiser) Bye() error { if err != nil { return err } - a.ch <- &message{to: addr, data: msg} - logf("sent bye") + a.ch <- &message{to: addr, data: multicast.BytesDataProvider(msg)} + ssdplog.Printf("sent bye") return nil } diff --git a/vendor/github.com/koron/go-ssdp/announce.go b/vendor/github.com/koron/go-ssdp/announce.go index b1c2008c..9874d01f 100644 --- a/vendor/github.com/koron/go-ssdp/announce.go +++ b/vendor/github.com/koron/go-ssdp/announce.go @@ -4,24 +4,35 @@ import ( "bytes" "fmt" "net" + + "github.com/koron/go-ssdp/internal/multicast" ) // AnnounceAlive sends ssdp:alive message. -func AnnounceAlive(nt, usn, location, server string, maxAge int, localAddr string) error { +// location should be a string or a ssdp.LocationProvider. +func AnnounceAlive(nt, usn string, location interface{}, server string, maxAge int, localAddr string) error { + locProv, err := toLocationProvider(location) + if err != nil { + return err + } // dial multicast UDP packet. - conn, err := multicastListen(&udpAddrResolver{addr: localAddr}) + conn, err := multicast.Listen(&multicast.AddrResolver{Addr: localAddr}) if err != nil { return err } defer conn.Close() // build and send message. - addr, err := multicastSendAddr() + addr, err := multicast.SendAddr() if err != nil { return err } - msg, err := buildAlive(addr, nt, usn, location, server, maxAge) - if err != nil { - return err + msg := &aliveDataProvider{ + host: addr, + nt: nt, + usn: usn, + location: locProv, + server: server, + maxAge: maxAge, } if _, err := conn.WriteTo(msg, addr); err != nil { return err @@ -29,9 +40,24 @@ func AnnounceAlive(nt, usn, location, server string, maxAge int, localAddr strin return nil } -func buildAlive(raddr net.Addr, nt, usn, location, server string, maxAge int) ([]byte, error) { +type aliveDataProvider struct { + host net.Addr + nt string + usn string + location LocationProvider + server string + maxAge int +} + +func (p *aliveDataProvider) Bytes(ifi *net.Interface) []byte { + return buildAlive(p.host, p.nt, p.usn, p.location.Location(nil, ifi), p.server, p.maxAge) +} + +var _ multicast.DataProvider = (*aliveDataProvider)(nil) + +func buildAlive(raddr net.Addr, nt, usn, location, server string, maxAge int) []byte { + // bytes.Buffer#Write() is never fail, so we can omit error checks. b := new(bytes.Buffer) - // FIXME: error should be checked. b.WriteString("NOTIFY * HTTP/1.1\r\n") fmt.Fprintf(b, "HOST: %s\r\n", raddr.String()) fmt.Fprintf(b, "NT: %s\r\n", nt) @@ -45,19 +71,19 @@ func buildAlive(raddr net.Addr, nt, usn, location, server string, maxAge int) ([ } fmt.Fprintf(b, "CACHE-CONTROL: max-age=%d\r\n", maxAge) b.WriteString("\r\n") - return b.Bytes(), nil + return b.Bytes() } // AnnounceBye sends ssdp:byebye message. func AnnounceBye(nt, usn, localAddr string) error { // dial multicast UDP packet. - conn, err := multicastListen(&udpAddrResolver{addr: localAddr}) + conn, err := multicast.Listen(&multicast.AddrResolver{Addr: localAddr}) if err != nil { return err } defer conn.Close() // build and send message. - addr, err := multicastSendAddr() + addr, err := multicast.SendAddr() if err != nil { return err } @@ -65,7 +91,7 @@ func AnnounceBye(nt, usn, localAddr string) error { if err != nil { return err } - if _, err := conn.WriteTo(msg, addr); err != nil { + if _, err := conn.WriteTo(multicast.BytesDataProvider(msg), addr); err != nil { return err } return nil diff --git a/vendor/github.com/koron/go-ssdp/doc.go b/vendor/github.com/koron/go-ssdp/doc.go index a1534868..d4dfd4b5 100644 --- a/vendor/github.com/koron/go-ssdp/doc.go +++ b/vendor/github.com/koron/go-ssdp/doc.go @@ -1,4 +1,4 @@ /* -Package ssdp provides ... +Package ssdp provides SSDP advertiser or so. */ package ssdp diff --git a/vendor/github.com/koron/go-ssdp/internal/multicast/doc.go b/vendor/github.com/koron/go-ssdp/internal/multicast/doc.go new file mode 100644 index 00000000..96d419f3 --- /dev/null +++ b/vendor/github.com/koron/go-ssdp/internal/multicast/doc.go @@ -0,0 +1,4 @@ +/* +Package multicast provides utilities for network multicast. +*/ +package multicast diff --git a/vendor/github.com/koron/go-ssdp/interface.go b/vendor/github.com/koron/go-ssdp/internal/multicast/interface.go similarity index 60% rename from vendor/github.com/koron/go-ssdp/interface.go rename to vendor/github.com/koron/go-ssdp/internal/multicast/interface.go index 6907e378..88fd5760 100644 --- a/vendor/github.com/koron/go-ssdp/interface.go +++ b/vendor/github.com/koron/go-ssdp/internal/multicast/interface.go @@ -1,33 +1,23 @@ -package ssdp +package multicast import ( "net" - "sync" ) -// Interfaces specify target interfaces to multicast. If no interfaces are -// specified, all interfaces will be used. -var Interfaces []net.Interface +type InterfacesProviderFunc func() []net.Interface -var ifLock sync.Mutex -var ifList []net.Interface +// InterfacesProvider specify a function to list all interfaces to multicast. +// If no provider are given, all possible interfaces will be used. +var InterfacesProvider InterfacesProviderFunc // interfaces gets list of net.Interface to multicast UDP packet. func interfaces() ([]net.Interface, error) { - ifLock.Lock() - defer ifLock.Unlock() - if len(Interfaces) > 0 { - return Interfaces, nil - } - if len(ifList) > 0 { - return ifList, nil - } - l, err := interfacesIPv4() - if err != nil { - return nil, err + if p := InterfacesProvider; p != nil { + if list := p(); len(list) > 0 { + return list, nil + } } - ifList = l - return ifList, nil + return interfacesIPv4() } // interfacesIPv4 lists net.Interface on IPv4. @@ -38,7 +28,7 @@ func interfacesIPv4() ([]net.Interface, error) { } list := make([]net.Interface, 0, len(iflist)) for _, ifi := range iflist { - if !hasLinkUp(&ifi) || !hasIPv4Address(&ifi) { + if !hasLinkUp(&ifi) || !hasMulticast(&ifi) || !hasIPv4Address(&ifi) { continue } list = append(list, ifi) @@ -51,6 +41,11 @@ func hasLinkUp(ifi *net.Interface) bool { return ifi.Flags&net.FlagUp != 0 } +// hasMulticast checks an I/F supports multicast or not. +func hasMulticast(ifi *net.Interface) bool { + return ifi.Flags&net.FlagMulticast != 0 +} + // hasIPv4Address checks an I/F have IPv4 address. func hasIPv4Address(ifi *net.Interface) bool { addrs, err := ifi.Addrs() diff --git a/vendor/github.com/koron/go-ssdp/multicast.go b/vendor/github.com/koron/go-ssdp/internal/multicast/multicast.go similarity index 61% rename from vendor/github.com/koron/go-ssdp/multicast.go rename to vendor/github.com/koron/go-ssdp/internal/multicast/multicast.go index 44202498..9a97353c 100644 --- a/vendor/github.com/koron/go-ssdp/multicast.go +++ b/vendor/github.com/koron/go-ssdp/internal/multicast/multicast.go @@ -1,4 +1,4 @@ -package ssdp +package multicast import ( "errors" @@ -7,17 +7,20 @@ import ( "strings" "time" + "github.com/koron/go-ssdp/internal/ssdplog" "golang.org/x/net/ipv4" ) -type multicastConn struct { +// Conn is multicast connection. +type Conn struct { laddr *net.UDPAddr conn *net.UDPConn pconn *ipv4.PacketConn iflist []net.Interface } -func multicastListen(r *udpAddrResolver) (*multicastConn, error) { +// Listen starts to receiving multicast messages. +func Listen(r *AddrResolver) (*Conn, error) { // prepare parameters. laddr, err := r.resolve() if err != nil { @@ -34,7 +37,7 @@ func multicastListen(r *udpAddrResolver) (*multicastConn, error) { conn.Close() return nil, err } - return &multicastConn{ + return &Conn{ laddr: laddr, conn: conn, pconn: pconn, @@ -47,7 +50,7 @@ func newIPv4MulticastConn(conn *net.UDPConn) (*ipv4.PacketConn, []net.Interface, if err != nil { return nil, nil, err } - addr, err := multicastSendAddr() + addr, err := SendAddr() if err != nil { return nil, nil, err } @@ -66,11 +69,11 @@ func joinGroupIPv4(conn *net.UDPConn, iflist []net.Interface, gaddr net.Addr) (* joined := 0 for _, ifi := range iflist { if err := wrap.JoinGroup(&ifi, gaddr); err != nil { - logf("failed to join group %s on %s: %s", gaddr.String(), ifi.Name, err) + ssdplog.Printf("failed to join group %s on %s: %s", gaddr.String(), ifi.Name, err) continue } joined++ - logf("joined group %s on %s", gaddr.String(), ifi.Name) + ssdplog.Printf("joined group %s on %s (#%d)", gaddr.String(), ifi.Name, ifi.Index) } if joined == 0 { return nil, errors.New("no interfaces had joined to group") @@ -78,7 +81,8 @@ func joinGroupIPv4(conn *net.UDPConn, iflist []net.Interface, gaddr net.Addr) (* return wrap, nil } -func (mc *multicastConn) Close() error { +// Close closes a multicast connection. +func (mc *Conn) Close() error { if err := mc.pconn.Close(); err != nil { return err } @@ -86,26 +90,49 @@ func (mc *multicastConn) Close() error { return nil } -func (mc *multicastConn) WriteTo(data []byte, to net.Addr) (int, error) { +// DataProvider provides a body of multicast message to send. +type DataProvider interface { + Bytes(*net.Interface) []byte +} + +//type multicastDataProviderFunc func(*net.Interface) []byte +// +//func (f multicastDataProviderFunc) Bytes(ifi *net.Interface) []byte { +// return f(ifi) +//} + +type BytesDataProvider []byte + +func (b BytesDataProvider) Bytes(ifi *net.Interface) []byte { + return []byte(b) +} + +// WriteTo sends a multicast message to interfaces. +func (mc *Conn) WriteTo(dataProv DataProvider, to net.Addr) (int, error) { if uaddr, ok := to.(*net.UDPAddr); ok && !uaddr.IP.IsMulticast() { - return mc.conn.WriteTo(data, to) + return mc.conn.WriteTo(dataProv.Bytes(nil), to) } + sum := 0 for _, ifi := range mc.iflist { if err := mc.pconn.SetMulticastInterface(&ifi); err != nil { return 0, err } - if _, err := mc.pconn.WriteTo(data, nil, to); err != nil { + n, err := mc.pconn.WriteTo(dataProv.Bytes(&ifi), nil, to) + if err != nil { return 0, err } + sum += n } - return len(data), nil + return sum, nil } -func (mc *multicastConn) LocalAddr() net.Addr { +// LocalAddr returns local address to listen multicast packets. +func (mc *Conn) LocalAddr() net.Addr { return mc.laddr } -func (mc *multicastConn) readPackets(timeout time.Duration, h packetHandler) error { +// ReadPackets reads multicast packets. +func (mc *Conn) ReadPackets(timeout time.Duration, h PacketHandler) error { buf := make([]byte, 65535) if timeout > 0 { mc.pconn.SetReadDeadline(time.Now().Add(timeout)) diff --git a/vendor/github.com/koron/go-ssdp/internal/multicast/udp.go b/vendor/github.com/koron/go-ssdp/internal/multicast/udp.go new file mode 100644 index 00000000..2d9b7d79 --- /dev/null +++ b/vendor/github.com/koron/go-ssdp/internal/multicast/udp.go @@ -0,0 +1,65 @@ +package multicast + +import ( + "net" + "sync" +) + +type PacketHandler func(net.Addr, []byte) error + +type AddrResolver struct { + Addr string + + mu sync.RWMutex + udp *net.UDPAddr + err error +} + +func (r *AddrResolver) setAddress(addr string) { + r.mu.Lock() + r.Addr = addr + r.udp = nil + r.err = nil + r.mu.Unlock() +} + +func (r *AddrResolver) resolve() (*net.UDPAddr, error) { + r.mu.RLock() + if err := r.err; err != nil { + r.mu.RUnlock() + return nil, err + } + if udp := r.udp; udp != nil { + r.mu.RUnlock() + return udp, nil + } + r.mu.RUnlock() + + r.mu.Lock() + defer r.mu.Unlock() + r.udp, r.err = net.ResolveUDPAddr("udp4", r.Addr) + return r.udp, r.err +} + +var RecvAddrResolver = &AddrResolver{Addr: "224.0.0.1:1900"} + +// SetRecvAddrIPv4 updates multicast address where to receive packets. +// This never fail now. +func SetRecvAddrIPv4(addr string) error { + RecvAddrResolver.setAddress(addr) + return nil +} + +var sendAddrResolver = &AddrResolver{Addr: "239.255.255.250:1900"} + +// SendAddr returns an address to send multicast UDP package. +func SendAddr() (*net.UDPAddr, error) { + return sendAddrResolver.resolve() +} + +// SetSendAddrIPv4 updates a UDP address to send multicast packets. +// This never fail now. +func SetSendAddrIPv4(addr string) error { + sendAddrResolver.setAddress(addr) + return nil +} diff --git a/vendor/github.com/koron/go-ssdp/internal/ssdplog/ssdplog.go b/vendor/github.com/koron/go-ssdp/internal/ssdplog/ssdplog.go new file mode 100644 index 00000000..2439a0fa --- /dev/null +++ b/vendor/github.com/koron/go-ssdp/internal/ssdplog/ssdplog.go @@ -0,0 +1,16 @@ +/* +Package ssdplog provides log mechanism for ssdp. +*/ +package ssdplog + +import "log" + +var LoggerProvider = func() *log.Logger { return nil } + +func Printf(s string, a ...interface{}) { + if p := LoggerProvider; p != nil { + if l := p(); l != nil { + l.Printf(s, a...) + } + } +} diff --git a/vendor/github.com/koron/go-ssdp/location.go b/vendor/github.com/koron/go-ssdp/location.go new file mode 100644 index 00000000..a7970ce2 --- /dev/null +++ b/vendor/github.com/koron/go-ssdp/location.go @@ -0,0 +1,40 @@ +package ssdp + +import ( + "fmt" + "net" +) + +// LocationProvider provides address for Location header which can be reached from +// "from" address network. +type LocationProvider interface { + // Location provides an address be reachable from the network located + // by "from" address or "ifi" interface. + // One of "from" or "ifi" must not be nil. + Location(from net.Addr, ifi *net.Interface) string +} + +// LocationProviderFunc type is an adapter to allow the use of ordinary +// functions are location providers. +type LocationProviderFunc func(net.Addr, *net.Interface) string + +func (f LocationProviderFunc) Location(from net.Addr, ifi *net.Interface) string { + return f(from, ifi) +} + +type fixedLocation string + +func (s fixedLocation) Location(net.Addr, *net.Interface) string { + return string(s) +} + +func toLocationProvider(v interface{}) (LocationProvider, error) { + switch w := v.(type) { + case string: + return fixedLocation(w), nil + case LocationProvider: + return w, nil + default: + return nil, fmt.Errorf("location should be a string or a ssdp.LocationProvider but got %T", w) + } +} diff --git a/vendor/github.com/koron/go-ssdp/log.go b/vendor/github.com/koron/go-ssdp/log.go deleted file mode 100644 index 56cd5bc8..00000000 --- a/vendor/github.com/koron/go-ssdp/log.go +++ /dev/null @@ -1,12 +0,0 @@ -package ssdp - -import "log" - -// Logger is default logger for SSDP module. -var Logger *log.Logger - -func logf(s string, a ...interface{}) { - if l := Logger; l != nil { - l.Printf(s, a...) - } -} diff --git a/vendor/github.com/koron/go-ssdp/monitor.go b/vendor/github.com/koron/go-ssdp/monitor.go index 9d79038c..e564d5cb 100644 --- a/vendor/github.com/koron/go-ssdp/monitor.go +++ b/vendor/github.com/koron/go-ssdp/monitor.go @@ -9,6 +9,9 @@ import ( "net" "net/http" "sync" + + "github.com/koron/go-ssdp/internal/multicast" + "github.com/koron/go-ssdp/internal/ssdplog" ) // Monitor monitors SSDP's alive and byebye messages. @@ -17,17 +20,17 @@ type Monitor struct { Bye ByeHandler Search SearchHandler - conn *multicastConn + conn *multicast.Conn wg sync.WaitGroup } // Start starts to monitor SSDP messages. func (m *Monitor) Start() error { - conn, err := multicastListen(recvAddrResolver) + conn, err := multicast.Listen(multicast.RecvAddrResolver) if err != nil { return err } - logf("monitoring on %s", conn.LocalAddr().String()) + ssdplog.Printf("monitoring on %s", conn.LocalAddr().String()) m.conn = conn m.wg.Add(1) go func() { @@ -38,7 +41,8 @@ func (m *Monitor) Start() error { } func (m *Monitor) serve() error { - err := m.conn.readPackets(0, func(addr net.Addr, data []byte) error { + // TODO: update listening interfaces of m.conn + err := m.conn.ReadPackets(0, func(addr net.Addr, data []byte) error { msg := make([]byte, len(data)) copy(msg, data) go m.handleRaw(addr, msg) @@ -62,7 +66,7 @@ func (m *Monitor) handleRaw(addr net.Addr, raw []byte) error { return m.handleNotify(addr, raw) } n := bytes.Index(raw, []byte("\r\n")) - logf("unexpected method: %q", string(raw[:n])) + ssdplog.Printf("unexpected method: %q", string(raw[:n])) return nil } diff --git a/vendor/github.com/koron/go-ssdp/search.go b/vendor/github.com/koron/go-ssdp/search.go index 7ae646b7..e4e5ddcc 100644 --- a/vendor/github.com/koron/go-ssdp/search.go +++ b/vendor/github.com/koron/go-ssdp/search.go @@ -10,6 +10,9 @@ import ( "regexp" "strconv" "time" + + "github.com/koron/go-ssdp/internal/multicast" + "github.com/koron/go-ssdp/internal/ssdplog" ) // Service is discovered service. @@ -68,15 +71,15 @@ const ( // Search searches services by SSDP. func Search(searchType string, waitSec int, localAddr string) ([]Service, error) { // dial multicast UDP packet. - conn, err := multicastListen(&udpAddrResolver{addr: localAddr}) + conn, err := multicast.Listen(&multicast.AddrResolver{Addr: localAddr}) if err != nil { return nil, err } defer conn.Close() - logf("search on %s", conn.LocalAddr().String()) + ssdplog.Printf("search on %s", conn.LocalAddr().String()) // send request. - addr, err := multicastSendAddr() + addr, err := multicast.SendAddr() if err != nil { return nil, err } @@ -84,7 +87,7 @@ func Search(searchType string, waitSec int, localAddr string) ([]Service, error) if err != nil { return nil, err } - if _, err := conn.WriteTo(msg, addr); err != nil { + if _, err := conn.WriteTo(multicast.BytesDataProvider(msg), addr); err != nil { return nil, err } @@ -93,15 +96,15 @@ func Search(searchType string, waitSec int, localAddr string) ([]Service, error) h := func(a net.Addr, d []byte) error { srv, err := parseService(a, d) if err != nil { - logf("invalid search response from %s: %s", a.String(), err) + ssdplog.Printf("invalid search response from %s: %s", a.String(), err) return nil } list = append(list, *srv) - logf("search response from %s: %s", a.String(), srv.USN) + ssdplog.Printf("search response from %s: %s", a.String(), srv.USN) return nil } d := time.Second * time.Duration(waitSec) - if err := conn.readPackets(d, h); err != nil { + if err := conn.ReadPackets(d, h); err != nil { return nil, err } diff --git a/vendor/github.com/koron/go-ssdp/ssdp.go b/vendor/github.com/koron/go-ssdp/ssdp.go new file mode 100644 index 00000000..5b875c02 --- /dev/null +++ b/vendor/github.com/koron/go-ssdp/ssdp.go @@ -0,0 +1,37 @@ +package ssdp + +import ( + "log" + "net" + + "github.com/koron/go-ssdp/internal/multicast" + "github.com/koron/go-ssdp/internal/ssdplog" +) + +func init() { + multicast.InterfacesProvider = func() []net.Interface { + return Interfaces + } + ssdplog.LoggerProvider = func() *log.Logger { + return Logger + } +} + +// Interfaces specify target interfaces to multicast. If no interfaces are +// specified, all interfaces will be used. +var Interfaces []net.Interface + +// Logger is default logger for SSDP module. +var Logger *log.Logger + +// SetMulticastRecvAddrIPv4 updates multicast address where to receive packets. +// This never fail now. +func SetMulticastRecvAddrIPv4(addr string) error { + return multicast.SetRecvAddrIPv4(addr) +} + +// SetMulticastSendAddrIPv4 updates a UDP address to send multicast packets. +// This never fail now. +func SetMulticastSendAddrIPv4(addr string) error { + return multicast.SetSendAddrIPv4(addr) +} diff --git a/vendor/github.com/koron/go-ssdp/udp.go b/vendor/github.com/koron/go-ssdp/udp.go deleted file mode 100644 index 3a2d2583..00000000 --- a/vendor/github.com/koron/go-ssdp/udp.go +++ /dev/null @@ -1,65 +0,0 @@ -package ssdp - -import ( - "net" - "sync" -) - -type packetHandler func(net.Addr, []byte) error - -type udpAddrResolver struct { - addr string - - mu sync.RWMutex - udp *net.UDPAddr - err error -} - -func (r *udpAddrResolver) setAddress(addr string) { - r.mu.Lock() - r.addr = addr - r.udp = nil - r.err = nil - r.mu.Unlock() -} - -func (r *udpAddrResolver) resolve() (*net.UDPAddr, error) { - r.mu.RLock() - if err := r.err; err != nil { - r.mu.RUnlock() - return nil, err - } - if udp := r.udp; udp != nil { - r.mu.RUnlock() - return udp, nil - } - r.mu.RUnlock() - - r.mu.Lock() - defer r.mu.Unlock() - r.udp, r.err = net.ResolveUDPAddr("udp4", r.addr) - return r.udp, r.err -} - -var recvAddrResolver = &udpAddrResolver{addr: "224.0.0.0:1900"} - -// SetMulticastRecvAddrIPv4 updates multicast address where to receive packets. -// This never fail now. -func SetMulticastRecvAddrIPv4(addr string) error { - recvAddrResolver.setAddress(addr) - return nil -} - -var sendAddrResolver = &udpAddrResolver{addr: "239.255.255.250:1900"} - -// multicastSendAddr returns an address to send multicast UDP package. -func multicastSendAddr() (*net.UDPAddr, error) { - return sendAddrResolver.resolve() -} - -// SetMulticastSendAddrIPv4 updates a UDP address to send multicast packets. -// This never fail now. -func SetMulticastSendAddrIPv4(addr string) error { - sendAddrResolver.setAddress(addr) - return nil -} diff --git a/vendor/github.com/libp2p/go-libp2p-asn-util/asn.go b/vendor/github.com/libp2p/go-libp2p-asn-util/asn.go index fe67238c..490b4a29 100644 --- a/vendor/github.com/libp2p/go-libp2p-asn-util/asn.go +++ b/vendor/github.com/libp2p/go-libp2p-asn-util/asn.go @@ -4,14 +4,15 @@ import ( "errors" "fmt" "net" + "sync" "github.com/libp2p/go-cidranger" ) -var Store *indirectAsnStore +var Store *lazyAsnStore func init() { - Store = newIndirectAsnStore() + Store = &lazyAsnStore{} } type networkWithAsn struct { @@ -66,32 +67,29 @@ func newAsnStore() (*asnStore, error) { return &asnStore{cr}, nil } -type indirectAsnStore struct { - store *asnStore - doneLoading chan struct{} +// lazyAsnStore builds the underlying trie on first call to AsnForIPv6. +// Alternatively, Init can be called to manually trigger initialization. +type lazyAsnStore struct { + store *asnStore + once sync.Once } // AsnForIPv6 returns the AS number for the given IPv6 address. // If no mapping exists for the given IP, this function will // return an empty ASN and a nil error. -func (a *indirectAsnStore) AsnForIPv6(ip net.IP) (string, error) { - <-a.doneLoading +func (a *lazyAsnStore) AsnForIPv6(ip net.IP) (string, error) { + a.once.Do(a.init) return a.store.AsnForIPv6(ip) } -func newIndirectAsnStore() *indirectAsnStore { - a := &indirectAsnStore{ - doneLoading: make(chan struct{}), - } - - go func() { - defer close(a.doneLoading) - store, err := newAsnStore() - if err != nil { - panic(err) - } - a.store = store - }() +func (a *lazyAsnStore) Init() { + a.once.Do(a.init) +} - return a +func (a *lazyAsnStore) init() { + store, err := newAsnStore() + if err != nil { + panic(err) + } + a.store = store } diff --git a/vendor/github.com/libp2p/go-libp2p-asn-util/version.json b/vendor/github.com/libp2p/go-libp2p-asn-util/version.json index 1437d5b7..a654d65a 100644 --- a/vendor/github.com/libp2p/go-libp2p-asn-util/version.json +++ b/vendor/github.com/libp2p/go-libp2p-asn-util/version.json @@ -1,3 +1,3 @@ { - "version": "v0.2.0" + "version": "v0.3.0" } diff --git a/vendor/github.com/libp2p/go-libp2p/.gitignore b/vendor/github.com/libp2p/go-libp2p/.gitignore index a505ae07..64c6d853 100644 --- a/vendor/github.com/libp2p/go-libp2p/.gitignore +++ b/vendor/github.com/libp2p/go-libp2p/.gitignore @@ -1,2 +1,6 @@ *.swp .idea +*.qlog +*.sqlog +*.qlog.zst +*.sqlog.zst diff --git a/vendor/github.com/libp2p/go-libp2p/CHANGELOG.md b/vendor/github.com/libp2p/go-libp2p/CHANGELOG.md new file mode 100644 index 00000000..8bec3802 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/CHANGELOG.md @@ -0,0 +1,225 @@ +# Table Of Contents +- [v0.27.0](#v0270) +- [v0.26.4](#v0264) +- [v0.26.3](#v0263) +- [v0.26.2](#v0262) +- [v0.26.1](#v0261) +- [v0.26.0](#v0260) +- [v0.25.1](#v0251) +- [v0.25.0](#v0250) + +# [v0.27.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.27.0) + +### Breaking Changes + +* The `LocalPrivateKey` method was removed from the `network.Conn` interface. [#2144](https://github.com/libp2p/go-libp2p/pull/2144) + +## 🔦 Highlights + +### Additional metrics +Since the last release, we've added metrics for: +* [Relay Service](https://github.com/libp2p/go-libp2p/pull/2154): RequestStatus, RequestCounts, RejectionReasons for Reservation and Connection Requests, +ConnectionDuration, BytesTransferred, Relay Service Status. +* [Autorelay](https://github.com/libp2p/go-libp2p/pull/2185): relay finder status, reservation request outcomes, current reservations, candidate circuit v2 support, current candidates, relay addresses updated, num relay address, and scheduled work times + +## 🐞 Bugfixes + +* autonat: don't change status on dial request refused [2225](https://github.com/libp2p/go-libp2p/pull/2225) +* relaysvc: fix flaky TestReachabilityChangeEvent [2215](https://github.com/libp2p/go-libp2p/pull/2215) +* basichost: prevent duplicate dials [2196](https://github.com/libp2p/go-libp2p/pull/2196) +* websocket: don't set a WSS multiaddr for accepted unencrypted conns [2199](https://github.com/libp2p/go-libp2p/pull/2199) +* identify: Fix IdentifyWait when Connected events happen out of order [2173](https://github.com/libp2p/go-libp2p/pull/2173) +* circuitv2: cleanup relay service properly [2164](https://github.com/libp2p/go-libp2p/pull/2164) + +**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.26.4...v0.27.0 + +# [v0.26.4](https://github.com/libp2p/go-libp2p/releases/tag/v0.26.4) + +This patch release fixes a busy-looping happening inside AutoRelay on private nodes, see [2208](https://github.com/libp2p/go-libp2p/pull/2208). + +**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.26.0...v0.26.4 + +# [v0.26.3](https://github.com/libp2p/go-libp2p/releases/tag/v0.26.3) + +* rcmgr: fix JSON marshalling of ResourceManagerStat peer map [2156](https://github.com/libp2p/go-libp2p/pull/2156) +* websocket: Don't limit message sizes in the websocket reader [2193](https://github.com/libp2p/go-libp2p/pull/2193) + +**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.26.0...v0.26.3 + +# [v0.26.2](https://github.com/libp2p/go-libp2p/releases/tag/v0.26.2) + +This patch release fixes two bugs: +* A panic in WebTransport: https://github.com/quic-go/webtransport-go/releases/tag/v0.5.2 +* Incorrect accounting of accepted connections in the swarm metrics: [#2147](https://github.com/libp2p/go-libp2p/pull/2147) + +**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.26.0...v0.26.2 + +# v0.26.1 + +This version was retracted due to errors when publishing the release. + +# [v0.26.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.26.0) + +## 🔦 Highlights + +### Circuit Relay Changes + +#### [Removed Circuit Relay v1](https://github.com/libp2p/go-libp2p/pull/2107) + +We've decided to remove support for Circuit Relay v1 in this release. v1 Relays have been retired a few months ago. Notably, running the Relay v1 protocol was expensive and resulted in only a small number of nodes in the network. Users had to either manually configure these nodes as static relays, or discover them from the DHT. +Furthermore, rust-libp2p [has dropped support](https://github.com/libp2p/rust-libp2p/pull/2549) and js-libp2p [is dropping support](https://github.com/libp2p/js-libp2p/pull/1533) for Relay v1. + +Support for Relay v2 was first added in [late 2021 in v0.16.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.16.0). With Circuit Relay v2 it became cheap to run (limited) relays. Public nodes also started the relay service by default. There's now a massive number of Relay v2 nodes on the IPFS network, and they don't advertise their service to the DHT any more. Because there's now so many of these nodes, connecting to just a small number of nodes (e.g. by joining the DHT), a node is statistically guaranteed to connect to some relays. + +#### [Unlimited Relay v2](https://github.com/libp2p/go-libp2p/pull/2125) + +In conjunction with removing relay v1, we also added an option to Circuit Relay v2 to disable limits. +This done by enabling `WithInfiniteLimits`. When enabled this allows for users to have a drop in replacement for Relay v1 with Relay v2. + +### Additional metrics + +Since the last release, we've added additional metrics to different components. +Metrics were added to: +* [AutoNat](https://github.com/libp2p/go-libp2p/pull/2086): Current Reachability Status and Confidence, Client and Server DialResponses, Server DialRejections. The dashboard is [available here](https://github.com/libp2p/go-libp2p/blob/master/dashboards/autonat/autonat.json). +* Swarm: + - [Early Muxer Selection](https://github.com/libp2p/go-libp2p/pull/2119): Added early_muxer label indicating whether a connection was established using early muxer selection. + - [IP Version](https://github.com/libp2p/go-libp2p/pull/2114): Added ip_version label to connection metrics +* Identify: + - Metrics for Identify, IdentifyPush, PushesTriggered (https://github.com/libp2p/go-libp2p/pull/2069) + - Address Count, Protocol Count, Connection IDPush Support (https://github.com/libp2p/go-libp2p/pull/2126) + + +We also migrated the metric dashboards to a top-level [dashboards](https://github.com/libp2p/go-libp2p/tree/master/dashboards) directory. + +## 🐞 Bugfixes + +### AutoNat +* [Fixed a bug](https://github.com/libp2p/go-libp2p/issues/2091) where AutoNat would emit events when the observed address has changed even though the node reachability hadn't changed. + +### Relay Manager +* [Fixed a bug](https://github.com/libp2p/go-libp2p/pull/2093) where the Relay Manager started a new relay even though the previous reachability was `Public` or if a relay already existed. + +### [Stop sending detailed error messages on closing QUIC connections](https://github.com/libp2p/go-libp2p/pull/2112) + +Users reported seeing confusing error messages and could not determine the root cause or if the error was from a local or remote peer: + +```{12D... Application error 0x0: conn-27571160: system: cannot reserve inbound connection: resource limit exceeded}``` + +This error occurred when a connection had been made with a remote peer but the remote peer dropped the connection (due to it exceeding limits). +This was actually an `Application error` emitted by `quic-go` and it was a bug in go-libp2p that we sent the whole message. +For now, we decided to stop sending this confusing error message. In the future, we will report such errors via [error codes](https://github.com/libp2p/specs/issues/479). + +**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.25.1...v0.26.0 + +# [v0.25.1](https://github.com/libp2p/go-libp2p/releases/tag/v0.25.1) + +Fix some test-utils used by https://github.com/libp2p/go-libp2p-kad-dht + +* mocknet: Start host in mocknet by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2078 +* chore: update go-multistream by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2081 + +**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.25.0...v0.25.1 + +# [v0.25.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.25.0) + +## 🔦 Highlights + +### Metrics + +We've started instrumenting the entire stack. In this release, we're adding metrics for: +* the swarm: tracking incoming and outgoing connections, transports, security protocols and stream multiplexers in use: (https://github.com/libp2p/go-libp2p/blob/master/p2p/net/swarm/grafana-dashboards/swarm.json) +* the event bus: tracking how different events are propagated through the stack and to external consumers (https://github.com/libp2p/go-libp2p/blob/master/p2p/host/eventbus/grafana-dashboards/eventbus.json) + +Our metrics effort is still ongoing, see https://github.com/libp2p/go-libp2p/issues/1356 for progress. We'll add metrics and dashboards for more libp2p components in a future release. + +### Switching to Google's official Protobuf compiler + +So far, we were using GoGo Protobuf to compile our Protobuf definitions to Go code. However, this library was deprecated in October last year: https://twitter.com/awalterschulze/status/1584553056100057088. We [benchmarked](https://github.com/libp2p/go-libp2p/issues/1976#issuecomment-1371527732) serialization and deserialization, and found that it's (only) 20% slower than GoGo. Since the vast majority of go-libp2p's CPU time is spent in code paths other than Protobuf handling, switching to the official compiler seemed like a worthwhile tradeoff. + +### Removal of OpenSSL + +Before this release, go-libp2p had an option to use OpenSSL bindings for certain cryptographic primitives, mostly to speed up the generation of signatures and their verification. When building go-libp2p using `go build`, we'd use the standard library crypto packages. OpenSSL was only used when passing in a build tag: `go build -tags openssl`. +Maintaining our own fork of the long unmaintained [go-openssl package](https://github.com/libp2p/go-openssl) has proven to place a larger than expected maintenance burden on the libp2p stewards, and when we recently discovered a range of new bugs ([this](https://github.com/libp2p/go-openssl/issues/38) and [this](https://github.com/libp2p/go-libp2p/issues/1892) and [this](https://github.com/libp2p/go-libp2p/issues/1951)), we decided to re-evaluate if this code path is really worth it. The results surprised us, it turns out that: +* The Go standard library is faster than OpenSSL for all key types that are not RSA. +* Verifying RSA signatures is as fast as Ed25519 signatures using the Go standard library, and even faster in OpenSSL. +* Generating RSA signatures is painfully slow, both using Go standard library crypto and using OpenSSL (but even slower using Go standard library). + +Now the good news is, that if your node is not using an RSA key, it will never create any RSA signatures (it might need to verify them though, when it connects to a node that uses RSA keys). If you're concerned about CPU performance, it's a good idea to avoid RSA keys (the same applies to bandwidth, RSA keys are huge!). Even for nodes using RSA keys, it turns out that generating the signatures is not a significant part of their CPU load, as verified by profiling one of Kubo's bootstrap nodes. + +We therefore concluded that it's safe to drop this code path altogether, and thereby reduce our maintenance burden. + +### New Resource Manager types + +* Introduces a new type `LimitVal` which can explicitly specify "use default", "unlimited", "block all", as well as any positive number. The zero value of `LimitVal` (the value when you create the object in Go) is "Use default". + * The JSON marshalling of this is straightforward. +* Introduces a new `ResourceLimits` type which uses `LimitVal` instead of ints so it can encode the above for the resources. +* Changes `LimitConfig` to `PartialLimitConfig` and uses `ResourceLimits`. This along with the marshalling changes means you can now marshal the fact that some resource limit is set to block all. + * Because the default is to use the defaults, this avoids the footgun of initializing the resource manager with 0 limits (that would block everything). + +In general, you can go from a resource config with defaults to a concrete one with `.Build()`. e.g. `ResourceLimits.Build() => BaseLimit`, `PartialLimitConfig.Build() => ConcreteLimitConfig`, `LimitVal.Build() => int`. See PR #2000 for more details. + +If you're using the defaults for the resource manager, there should be no changes needed. + +### Other Breaking Changes + +We've cleaned up our API to consistently use `protocol.ID` for libp2p and application protocols. Specifically, this means that the peer store now uses `protocol.ID`s, and the host's `SetStreamHandler` as well. + +## What's Changed +* chore: use generic LRU cache by @muXxer in https://github.com/libp2p/go-libp2p/pull/1980 +* core/crypto: drop all OpenSSL code paths by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1953 +* add WebTransport to the list of default transports by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1915 +* identify: remove old code targeting Go 1.17 by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1964 +* core: remove introspection package by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1978 +* identify: remove support for Identify Delta by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1975 +* roadmap: remove optimizations of the TCP-based handshake by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1959 +* circuitv2: correctly set the transport in the ConnectionState by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1972 +* switch to Google's Protobuf library, make protobufs compile with go generate by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1979 +* ci: run go generate as part of the go-check workflow by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1986 +* ci: use GitHub token to install protoc by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1996 +* feat: add some users to the readme by @p-shahi in https://github.com/libp2p/go-libp2p/pull/1981 +* CI: Fast multidimensional Interop tests by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/1991 +* Fix: Ignore zero values when marshalling Limits. by @ajnavarro in https://github.com/libp2p/go-libp2p/pull/1998 +* feat: add ci flakiness score to readme by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2002 +* peerstore: make it possible to use an empty peer ID by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2006 +* feat: rcmgr: Export resource manager errors by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2008 +* feat: ci test-plans: Parse test timeout parameter for interop test by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2014 +* Clean addresses with peer id before adding to addrbook by @sukunrt in https://github.com/libp2p/go-libp2p/pull/2007 +* Expose muxer ids by @aschmahmann in https://github.com/libp2p/go-libp2p/pull/2012 +* swarm: add a basic metrics tracer by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1973 +* consistently use protocol.ID instead of strings by @sukunrt in https://github.com/libp2p/go-libp2p/pull/2004 +* swarm metrics: fix datasource for dashboard by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2024 +* chore: remove textual roadmap in favor for Starmap by @p-shahi in https://github.com/libp2p/go-libp2p/pull/2036 +* rcmgr: *: Always close connscope by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2037 +* chore: remove license files from the eventbus package by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2042 +* Migrate to test-plan composite action by @thomaseizinger in https://github.com/libp2p/go-libp2p/pull/2039 +* use quic-go and webtransport-go from quic-go organization by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2040 +* holepunch: fix flaky test by not removing holepunch protocol handler by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1948 +* quic / webtransport: extend test to test dialing a draft-29 and a v1 by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1957 +* p2p/test: add test for EvtLocalAddressesUpdated event by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2016 +* quic, tcp: only register Prometheus counters when metrics are enabled by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1971 +* p2p/test: fix flaky notification test by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2051 +* quic: disable sending of Version Negotiation packets by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2015 +* eventbus: add metrics by @sukunrt in https://github.com/libp2p/go-libp2p/pull/2038 +* metrics: use a single slice pool for all metrics tracer by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2054 +* webtransport: tidy up some test output by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2053 +* set names for eventbus event subscriptions by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2057 +* autorelay: Split libp2p.EnableAutoRelay into 2 functions by @sukunrt in https://github.com/libp2p/go-libp2p/pull/2022 +* rcmgr: Use prometheus SDK for rcmgr metrics by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2044 +* websocket: Replace gorilla websocket transport with nhooyr websocket transport by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/1982 +* rcmgr: add libp2p prefix to all metrics by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2063 +* chore: git-ignore various flavors of qlog files by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2064 +* interop: Update interop test to match spec by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2049 +* chore: update webtransport-go to v0.5.1 by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2072 +* identify: refactor sending of Identify pushes by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/1984 +* feat!: rcmgr: Change LimitConfig to use LimitVal type by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2000 +* p2p/test/quic: use contexts with a timeout for Connect calls by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2070 +* identify: add some basic metrics by @marten-seemann in https://github.com/libp2p/go-libp2p/pull/2069 +* chore: Release v0.25.0 by @MarcoPolo in https://github.com/libp2p/go-libp2p/pull/2077 + +## New Contributors +* @muXxer made their first contribution in https://github.com/libp2p/go-libp2p/pull/1980 +* @ajnavarro made their first contribution in https://github.com/libp2p/go-libp2p/pull/1998 +* @sukunrt made their first contribution in https://github.com/libp2p/go-libp2p/pull/2007 +* @thomaseizinger made their first contribution in https://github.com/libp2p/go-libp2p/pull/2039 + +**Full Changelog**: https://github.com/libp2p/go-libp2p/compare/v0.24.2...v0.25.0 diff --git a/vendor/github.com/libp2p/go-libp2p/README.md b/vendor/github.com/libp2p/go-libp2p/README.md index 7e2c48ad..533a36f7 100644 --- a/vendor/github.com/libp2p/go-libp2p/README.md +++ b/vendor/github.com/libp2p/go-libp2p/README.md @@ -10,6 +10,7 @@ Go Reference +

# Table of Contents @@ -55,11 +56,6 @@ import "github.com/libp2p/go-libp2p" Examples can be found in the [examples folder](examples). -## Development - -### Tests - -`go test ./...` will run all tests in the repo. # Contribute @@ -82,3 +78,25 @@ There's a few things you can do right now to help out: We test against and support the two most recent major releases of Go. This is informed by Go's own [security policy](https://go.dev/security). + +# Notable Users +Some notable users of go-libp2p are: +- [Kubo](https://github.com/ipfs/kubo) - The original Go implementation of IPFS +- [Lotus](https://github.com/filecoin-project/lotus) - An implementation of the Filecoin protocol +- [Drand](https://github.com/drand/drand) - A distributed random beacon daemon +- [Prysm](https://github.com/prysmaticlabs/prysm) - An Ethereum Beacon Chain consensus client built by [Prysmatic Labs](https://prysmaticlabs.com/) +- [Berty](https://github.com/berty/berty) - An open, secure, offline-first, peer-to-peer and zero trust messaging app. +- [Wasp](https://github.com/iotaledger/wasp) - A node that runs IOTA Smart Contracts built by the [IOTA Foundation](https://www.iota.org/) +- [Mina](https://github.com/minaprotocol/mina) - A lightweight, constant-sized blockchain that runs zero-knowledge smart contracts +- [Polygon Edge](https://github.com/0xPolygon/polygon-edge) - A modular, extensible framework for building Ethereum compatible networks +- [Celestia Node](https://github.com/celestiaorg/celestia-node) - The Go implementation of Celestia's data availability nodes +- [Status go](https://github.com/status-im/status-go) - Status bindings for go-ethereum, built by [Status.im](https://status.im/) +- [Flow](https://github.com/onflow/flow-go) - A blockchain built to support games, apps, and digital assets built by [Dapper Labs](https://www.dapperlabs.com/) +- [Swarm Bee](https://github.com/ethersphere/bee) - A client for connecting to the [Swarm network](https://www.ethswarm.org/) +- [Elrond Go](https://github.com/multiversx/mx-chain-go) - The Go implementation of the the Elrond network protocol +- [Sonr](https://github.com/sonr-io/sonr) - A platform to integrate DID Documents, WebAuthn, and IPFS and manage digital identity and assets. +- [EdgeVPN](https://github.com/mudler/edgevpn) - A decentralized, immutable, portable VPN and reverse proxy over p2p. +- [Kairos](https://github.com/kairos-io/kairos) - A Kubernetes-focused, Cloud Native Linux meta-distribution. +- [Oasis Core](https://github.com/oasisprotocol/oasis-core) - The consensus and runtime layers of the [Oasis protocol](https://oasisprotocol.org/). + +Please open a pull request if you want your project to be added here. diff --git a/vendor/github.com/libp2p/go-libp2p/ROADMAP.md b/vendor/github.com/libp2p/go-libp2p/ROADMAP.md index 3d4fde81..5c9eb603 100644 --- a/vendor/github.com/libp2p/go-libp2p/ROADMAP.md +++ b/vendor/github.com/libp2p/go-libp2p/ROADMAP.md @@ -1,178 +1,5 @@ -# go-libp2p roadmap Q4’22/Q1’23 +# go-libp2p roadmap Q4’22/Q1’23 -``` -Date: 2022-10-20 -Status: Accepted -Notes: Internal go-libp2p stakeholders have aligned on this roadmap. Please add any feedback or questions in: -https://github.com/libp2p/go-libp2p/issues/1806 -``` +Please see our roadmap in [Starmap](https://starmap.site/roadmap/github.com/libp2p/go-libp2p/issues/1806#simple) -## Table of Contents - -- [About the Roadmap](#about-the-roadmap) - - [Vision](#vision) - - [Sections](#sections) - - [Done criteria](#done-criteria) -- [Benchmarking and Testing](#benchmarking-and-testing) -- [🛣️ Milestones](#️-milestones) - - [2022](#2022) - - [Early Q4 (October)](#early-q4-october) - - [Mid Q4 (November)](#mid-q4-november) - - [End of Q4 (December)](#end-of-q4-december) - - [2023](#2023) - - [Early Q1 (January)](#early-q1-january) - - [Mid Q1 (February)](#mid-q1-february) - - [End of Q1 (March)](#end-of-q1-march) - - [Up Next](#up-next) -- [📖 Appendix](#-appendix) - - [A. 📺 Universal Browser Connectivity](#a--universal-browser-connectivity) - - [1. WebRTC: Browser to Server](#1-webrtc-browser-to-server) - - [2. WebRTC: Browser to Browser](#2-webrtc-browser-to-browser) - - [3. WebTransport: Update to new draft versions](#3-webtransport-update-to-new-draft-versions) - - [B. ⚡️ Handshakes at the Speed of Light](#b-️-handshakes-at-the-speed-of-light) - - [1. Early Muxer Negotiation](#1-early-muxer-negotiation) - - [2. Adding security protocol](#2-adding-security-protocol) - - [3. 0.5 RTT data optimization](#3-05-rtt-data-optimization) - - [C. 🧠 Smart Dialing](#c--smart-dialing) - - [1. Happy Eyeballs](#1-happy-eyeballs) - - [2. QUIC Blackhole detector](#2-quic-blackhole-detector) - - [3. RTT estimation](#3-rtt-estimation) - - [D. 📊 Comprehensive Metrics](#d--comprehensive-metrics) - - [E. 📢 Judicious Address Advertisements](#e--judicious-address-advertisements) - -## About the Roadmap - -### Vision -We, the maintainers, are committed to upholding libp2p's shared core tenets and ensuring go-libp2p is: [**Secure, Stable, Specified, and Performant.**](https://github.com/libp2p/specs/blob/master/ROADMAP.md#core-tenets) - -Roadmap items in this document were sourced in part from the [overarching libp2p project roadmap.](https://github.com/libp2p/specs/blob/master/ROADMAP.md) - -### Sections -This document consists of two sections: [Milestones](#️-milestones) and the [Appendix](#-appendix) - -[Milestones](#️-milestones) is our best educated guess (not a hard commitment) around when we plan to ship the key features. -Where possible projects are broken down into discrete sub-projects e.g. project "A" may contain two sub-projects: A.1 and A.2 - -A project is signified as "complete" once all of it's sub-projects are shipped. - -The [Appendix](#-appendix) section describes a project's high-level motivation, goals, and lists sub-projects. - -Each Appendix header is linked to a GitHub Epic. Latest information on progress can be found in the Epics and child issues. - -### Done criteria -The "Definition of Done" for projects/sub-projects that involve writing new protocols/ modify existing ones usually consist of the following: -- If a specification change is required: - - [ ] Spec is merged and classified as "Candidate Recommendation" - - [ ] (by virtue of the above) At least one major reference implementation exists -- [ ] A well established testing criteria is met (defined at the outset of the project including but not limited to testing via Testground, compatibility tests with other implementations in the Release process, etc.) -- [ ] Public documentation (on docs.libp2p.io) exists - -Supporting projects (such as testing or benchmarking) may have different criteria. - -## Benchmarking and Testing -As mentioned in our [vision](#vision), performance and stability are core libp2p tenets. Rigorous benchmarking and testing help us uphold them. Related projects are listed in the [libp2p/test-plans roadmap](https://github.com/libp2p/test-plans/blob/master/ROADMAP.md) and the [testground/testground roadmap](https://github.com/testground/testground/blob/master/ROADMAP.md). -Our major priorities in Q4’22 and Q1’23 are: -- [interoperability testing](https://github.com/libp2p/test-plans/issues/53) (across implementations & versions and between transports, muxers, & security protocols) -- performance [benchmark go-libp2p using Testground](https://github.com/testground/testground/pull/1425) (create a benchmark suite to run in CI, create a public performance dashboard, [demonstrate libp2p is able to achieve performance on par with HTTP](https://github.com/libp2p/test-plans/issues/27)) - -These projects are parallel workstreams, weighed equally with roadmap items in this document. Some efforts like interoperability testing have a higher priority than implementation projects. The go-libp2p maintainers co-own these efforts with the js-libp2p, rust-libp2p, and Testground maintainers. - -[**Click here to see the shared Q4’22/Q1’23 testing and benchmarking priorities.**](https://github.com/libp2p/test-plans/blob/master/ROADMAP.md) - -## 🛣️ Milestones -### 2022 - -#### Early Q4 (October) -- [B.1 ⚡ Early Muxer Negotiation](#1-early-muxer-negotiation) - -#### Mid Q4 (November) -- [***➡️ test-plans/Interop tests for all existing/developing libp2p transports***](https://github.com/libp2p/test-plans/blob/master/ROADMAP.md#2-interop-test-plans-for-all-existingdeveloping-libp2p-transports) -- [***➡️ test-plans/Benchmarking using nix-builders***](https://github.com/libp2p/test-plans/blob/master/ROADMAP.md#1-benchmarking-using-nix-builders) - -#### End of Q4 (December) -- [A.1 📺 WebRTC Browser -> Server](#1-webrtc-for-browser-to-server) -- [C.1 🧠 Happy Eyeballs](#1-happy-eyeballs) -- [D 📊 Swarm Metrics](#e--comprehensive-metrics) - -### 2023 - -#### Early Q1 (January) -- [B.2 ⚡ Adding security protocol](#2-adding-security-protocol) -- [C.2 🧠 QUIC Blackhole detector](#2-quic-blackhole-detector) - -#### Mid Q1 (February) -- [C.3 🧠 RTT estimation](#3-rtt-estimation) - - 🎉 Estimated Project Completion - -#### End of Q1 (March) -- [B.3 ⚡ 0.5 RTT data optimization (for QUIC)](#3-05-rtt-data-optimization) - - 🎉 Estimated Project Completion -- [***➡️ test-plans/Benchmarking using remote runners***](https://github.com/libp2p/test-plans/blob/master/ROADMAP.md#2-benchmarking-using-remote-runners) - -### Up Next -- [A.2 📺 WebRTC: Browser to Browser](#2-webrtc-browser-to-browser) -- [A.3 📺 WebTransport: Update to new draft versions](#3-webtransport-update-to-new-draft-versions) -- [***➡️ test-plans/Expansive protocol test coverage***](https://github.com/libp2p/test-plans/blob/master/ROADMAP.md#d-expansive-protocol-test-coverage) -- [E 📢 Judicious Address Advertisements](#f--judicious-address-advertisements) - -## 📖 Appendix - -**Projects are listed in descending priority.** - -### [A. 📺 Universal Browser Connectivity](https://github.com/libp2p/go-libp2p/issues/1811) - -**Why**: A huge part of “the Web” is happening inside the browser. As a universal p2p networking stack, libp2p needs to be able to offer solutions for browser users. - -**Goal**: go-libp2p ships with up-to-date WebTransport and (libp2p-) WebRTC implementations, enabled by default. This allows connections between browsers and public nodes, browsers and non-public nodes, as well as two browsers. - -#### 1. [WebRTC: Browser to Server](https://github.com/libp2p/go-libp2p/pull/1655) -Add support for WebRTC transport in go-libp2p, enabling browser connectivity with servers. This will cover the browsers that don't support WebTransport (most notable is iOS Safari). This is getting close to finalized. -#### 2. WebRTC: Browser to Browser -A follow up to A.1 where we will begin the work to specify the semantics of browser to browser connectivity and then implement it in go-libp2p. -#### 3. [WebTransport: Update to new draft versions](https://github.com/libp2p/go-libp2p/issues/1717) -As the protocol is still under development by IETF and W3C, the go-libp2p implementation needs to follow. We have a dependency on Chrome to support the new draft version of WebTransport protocol. To stay up to date, we will have to move as soon as Chrome ships supports the new draft version. - -### [B. ⚡️ Handshakes at the Speed of Light](https://github.com/libp2p/go-libp2p/issues/1807) - -**Why**: Historically, libp2p has been very wasteful when it comes to round trips spent during connection establishment. This is slowing down our users, especially their TTFB (time to first byte) metrics. - -**Goal**: go-libp2p optimizes its handshake latency up to the point where only increasing the speed of light would lead to further speedups. In particular, this means: - -#### 1. [Early Muxer Negotiation](https://github.com/libp2p/specs/issues/426) -Cutting off the 1 RTT wasted on muxer negotiation -#### 2. [Adding security protocol](https://github.com/libp2p/specs/pull/353) -Cutting off the 1 RTT wasted on security protocol negotiation by including the security protocol in the multiaddr -#### 3. 0.5 RTT data optimization -Using 0.5-RTT data (for TLS) / a Noise Extension to ship the list of Identify protocols, cutting of 1 RTT that many protocols spend waiting on `IdentifyWait` - -### [C. 🧠 Smart Dialing](https://github.com/libp2p/go-libp2p/issues/1808) - -**Why**: Having a large list of transports to pick from is great. Having an advanced stack that can dial all of them is even greater. But dialing all of them at the same time wastes our, the network’s and the peer’s resources. - -**Goal**: When given a list of multiaddrs of a peer, go-libp2p is smart enough to pick the address that results in the most performant connection (for example, preferring QUIC over TCP), while also picking the address such that maximizes the likelihood of a successful handshake. - -#### 1. [Happy Eyeballs](https://github.com/libp2p/go-libp2p/issues/1785) -Implement some kind of “Happy-Eyeballs” style prioritization among all supported transports -#### 2. QUIC Blackhole detector -Detection of blackholes, especially relevant to detect UDP (QUIC) blackholing -#### 3. RTT estimation -Estimation of the expected RTT of a connection based on two nodes’ IP addresses, so that Happy Eyeballs Timeouts can be set dynamically - -### [D. 📊 Comprehensive Metrics](https://github.com/libp2p/go-libp2p/issues/1356) - -**Why**: For far too long, go-libp2p has been a black box. This has hurt us many times, by allowing trivial bugs to go undetected for a long time ([example](https://github.com/ipfs/kubo/pull/8750)). Having metrics will allow us to track the impact of performance improvements we make over time. - -**Goal**: Export a wider set of metrics across go-libp2p components and enable node operators to monitor their nodes in production. Optionally provide a sample Grafana dashboard similar to the resource manager dashboard. - -**How**: This will look similar to how we already expose resource manager metrics. Metrics can be added incrementally for libp2p’s components. First milestone is having metrics for the swarm. - - -### [E. 📢 Judicious Address Advertisements](https://github.com/libp2p/go-libp2p/issues/1812) - -**Why**: A node that advertises lots of addresses hurts itself. Other nodes will have to try dialing a lot of addresses before they find one that actually works, dramatically increasing handshake latencies. - -**Goal**: Nodes only advertise addresses that they are actually reachable at. - -**How**: Unfortunately, the AutoNAT protocol can’t be used to probe the reachability of any particular address (especially due to a bug in the go-libp2p implementation deployed years ago). Most likely, we need a second version of the AutoNAT protocol. - -Related discussion: [https://github.com/libp2p/go-libp2p/issues/1480](https://github.com/libp2p/go-libp2p/issues/1480) +Please add any feedback or questions in: https://github.com/libp2p/go-libp2p/issues/1806 \ No newline at end of file diff --git a/vendor/github.com/libp2p/go-libp2p/SECURITY.md b/vendor/github.com/libp2p/go-libp2p/SECURITY.md new file mode 100644 index 00000000..0ecad430 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/SECURITY.md @@ -0,0 +1,20 @@ +# Security Policy + +go-libp2p is still in development. This means that there may be problems in our protocols, +or there may be mistakes in our implementations. +We take security vulnerabilities very seriously. If you discover a security issue, +please bring it to our attention right away! + +## Reporting a Vulnerability + +If you find a vulnerability that may affect live deployments -- for example, by exposing +a remote execution exploit -- please [**report privately**](https://github.com/libp2p/go-libp2p/security/advisories/new). +Please **DO NOT file a public issue**. + +If the issue is an implementation weakness that cannot be immediately exploited or +something not yet deployed, just discuss it openly. +If you need assistance, please reach out to [security@libp2p.io](mailto:security@libp2p.io). + +## Reporting a non security bug + +For non-security bugs, please simply file a GitHub [issue](https://github.com/libp2p/go-libp2p/issues/new). diff --git a/vendor/github.com/libp2p/go-libp2p/config/config.go b/vendor/github.com/libp2p/go-libp2p/config/config.go index 94e18be2..a3bd86f2 100644 --- a/vendor/github.com/libp2p/go-libp2p/config/config.go +++ b/vendor/github.com/libp2p/go-libp2p/config/config.go @@ -8,6 +8,7 @@ import ( "github.com/libp2p/go-libp2p/core/connmgr" "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/metrics" "github.com/libp2p/go-libp2p/core/network" @@ -23,6 +24,7 @@ import ( "github.com/libp2p/go-libp2p/p2p/host/autorelay" bhost "github.com/libp2p/go-libp2p/p2p/host/basic" blankhost "github.com/libp2p/go-libp2p/p2p/host/blank" + "github.com/libp2p/go-libp2p/p2p/host/eventbus" "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem" routed "github.com/libp2p/go-libp2p/p2p/host/routed" "github.com/libp2p/go-libp2p/p2p/net/swarm" @@ -31,6 +33,7 @@ import ( relayv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay" "github.com/libp2p/go-libp2p/p2p/protocol/holepunch" "github.com/libp2p/go-libp2p/p2p/transport/quicreuse" + "github.com/prometheus/client_golang/prometheus" ma "github.com/multiformats/go-multiaddr" madns "github.com/multiformats/go-multiaddr-dns" @@ -117,9 +120,12 @@ type Config struct { EnableHolePunching bool HolePunchingOptions []holepunch.Option + + DisableMetrics bool + PrometheusRegisterer prometheus.Registerer } -func (cfg *Config) makeSwarm() (*swarm.Swarm, error) { +func (cfg *Config) makeSwarm(eventBus event.Bus, enableMetrics bool) (*swarm.Swarm, error) { if cfg.Peerstore == nil { return nil, fmt.Errorf("no peerstore specified") } @@ -151,7 +157,7 @@ func (cfg *Config) makeSwarm() (*swarm.Swarm, error) { return nil, err } - opts := make([]swarm.Option, 0, 3) + opts := make([]swarm.Option, 0, 6) if cfg.Reporter != nil { opts = append(opts, swarm.WithMetrics(cfg.Reporter)) } @@ -167,8 +173,12 @@ func (cfg *Config) makeSwarm() (*swarm.Swarm, error) { if cfg.MultiaddrResolver != nil { opts = append(opts, swarm.WithMultiaddrResolver(cfg.MultiaddrResolver)) } + if enableMetrics { + opts = append(opts, + swarm.WithMetricsTracer(swarm.NewMetricsTracer(swarm.WithRegisterer(cfg.PrometheusRegisterer)))) + } // TODO: Make the swarm implementation configurable. - return swarm.NewSwarm(pid, cfg.Peerstore, opts...) + return swarm.NewSwarm(pid, cfg.Peerstore, eventBus, opts...) } func (cfg *Config) addTransports(h host.Host) error { @@ -276,22 +286,26 @@ func (cfg *Config) addTransports(h host.Host) error { // // This function consumes the config. Do not reuse it (really!). func (cfg *Config) NewNode() (host.Host, error) { - swrm, err := cfg.makeSwarm() + eventBus := eventbus.NewBus(eventbus.WithMetricsTracer(eventbus.NewMetricsTracer(eventbus.WithRegisterer(cfg.PrometheusRegisterer)))) + swrm, err := cfg.makeSwarm(eventBus, !cfg.DisableMetrics) if err != nil { return nil, err } h, err := bhost.NewHost(swrm, &bhost.HostOpts{ - ConnManager: cfg.ConnManager, - AddrsFactory: cfg.AddrsFactory, - NATManager: cfg.NATManager, - EnablePing: !cfg.DisablePing, - UserAgent: cfg.UserAgent, - ProtocolVersion: cfg.ProtocolVersion, - EnableHolePunching: cfg.EnableHolePunching, - HolePunchingOptions: cfg.HolePunchingOptions, - EnableRelayService: cfg.EnableRelayService, - RelayServiceOpts: cfg.RelayServiceOpts, + EventBus: eventBus, + ConnManager: cfg.ConnManager, + AddrsFactory: cfg.AddrsFactory, + NATManager: cfg.NATManager, + EnablePing: !cfg.DisablePing, + UserAgent: cfg.UserAgent, + ProtocolVersion: cfg.ProtocolVersion, + EnableHolePunching: cfg.EnableHolePunching, + HolePunchingOptions: cfg.HolePunchingOptions, + EnableRelayService: cfg.EnableRelayService, + RelayServiceOpts: cfg.RelayServiceOpts, + EnableMetrics: !cfg.DisableMetrics, + PrometheusRegisterer: cfg.PrometheusRegisterer, }) if err != nil { swrm.Close() @@ -340,6 +354,12 @@ func (cfg *Config) NewNode() (host.Host, error) { h.Close() return nil, fmt.Errorf("cannot enable autorelay; relay is not enabled") } + if !cfg.DisableMetrics { + mt := autorelay.WithMetricsTracer( + autorelay.NewMetricsTracer(autorelay.WithRegisterer(cfg.PrometheusRegisterer))) + mtOpts := []autorelay.Option{mt} + cfg.AutoRelayOpts = append(mtOpts, cfg.AutoRelayOpts...) + } ar, err = autorelay.NewAutoRelay(h, cfg.AutoRelayOpts...) if err != nil { @@ -352,6 +372,11 @@ func (cfg *Config) NewNode() (host.Host, error) { return addrF(h.AllAddrs()) }), } + if !cfg.DisableMetrics { + autonatOpts = append(autonatOpts, + autonat.WithMetricsTracer( + autonat.NewMetricsTracer(autonat.WithRegisterer(cfg.PrometheusRegisterer)))) + } if cfg.AutoNATConfig.ThrottleInterval != 0 { autonatOpts = append(autonatOpts, autonat.WithThrottling(cfg.AutoNATConfig.ThrottleGlobalLimit, cfg.AutoNATConfig.ThrottleInterval), @@ -382,7 +407,7 @@ func (cfg *Config) NewNode() (host.Host, error) { Peerstore: ps, } - dialer, err := autoNatCfg.makeSwarm() + dialer, err := autoNatCfg.makeSwarm(eventbus.NewBus(), false) if err != nil { h.Close() return nil, err @@ -418,7 +443,9 @@ func (cfg *Config) NewNode() (host.Host, error) { ho = routed.Wrap(h, router) } if ar != nil { - return autorelay.NewAutoRelayHost(ho, ar), nil + arh := autorelay.NewAutoRelayHost(ho, ar) + arh.Start() + ho = arh } return ho, nil } diff --git a/vendor/github.com/libp2p/go-libp2p/config/quic_stateless_reset.go b/vendor/github.com/libp2p/go-libp2p/config/quic_stateless_reset.go index 3cbb6970..a12be56f 100644 --- a/vendor/github.com/libp2p/go-libp2p/config/quic_stateless_reset.go +++ b/vendor/github.com/libp2p/go-libp2p/config/quic_stateless_reset.go @@ -8,7 +8,7 @@ import ( "github.com/libp2p/go-libp2p/core/crypto" - "github.com/lucas-clemente/quic-go" + "github.com/quic-go/quic-go" ) const statelessResetKeyInfo = "libp2p quic stateless reset key" diff --git a/vendor/github.com/libp2p/go-libp2p/core/crypto/key.go b/vendor/github.com/libp2p/go-libp2p/core/crypto/key.go index 3d7b39a2..9133141c 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/crypto/key.go +++ b/vendor/github.com/libp2p/go-libp2p/core/crypto/key.go @@ -12,11 +12,13 @@ import ( "fmt" "io" - pb "github.com/libp2p/go-libp2p/core/crypto/pb" + "github.com/libp2p/go-libp2p/core/crypto/pb" - "github.com/gogo/protobuf/proto" + "google.golang.org/protobuf/proto" ) +//go:generate protoc --go_out=. --go_opt=Mpb/crypto.proto=./pb pb/crypto.proto + const ( // RSA is an enum for the supported RSA key type RSA = iota @@ -194,7 +196,7 @@ func PublicKeyFromProto(pmes *pb.PublicKey) (PubKey, error) { switch tpk := pk.(type) { case *RsaPublicKey: - tpk.cached, _ = pmes.Marshal() + tpk.cached, _ = proto.Marshal(pmes) } return pk, nil @@ -214,14 +216,14 @@ func MarshalPublicKey(k PubKey) ([]byte, error) { // PublicKeyToProto converts a public key object into an unserialized // protobuf PublicKey message. func PublicKeyToProto(k PubKey) (*pb.PublicKey, error) { - pbmes := new(pb.PublicKey) - pbmes.Type = k.Type() data, err := k.Raw() if err != nil { return nil, err } - pbmes.Data = data - return pbmes, nil + return &pb.PublicKey{ + Type: k.Type().Enum(), + Data: data, + }, nil } // UnmarshalPrivateKey converts a protobuf serialized private key into its @@ -243,15 +245,14 @@ func UnmarshalPrivateKey(data []byte) (PrivKey, error) { // MarshalPrivateKey converts a key object into its protobuf serialized form. func MarshalPrivateKey(k PrivKey) ([]byte, error) { - pbmes := new(pb.PrivateKey) - pbmes.Type = k.Type() data, err := k.Raw() if err != nil { return nil, err } - - pbmes.Data = data - return proto.Marshal(pbmes) + return proto.Marshal(&pb.PrivateKey{ + Type: k.Type().Enum(), + Data: data, + }) } // ConfigDecodeKey decodes from b64 (for config file) to a byte array that can be unmarshalled. diff --git a/vendor/github.com/libp2p/go-libp2p/core/crypto/key_openssl.go b/vendor/github.com/libp2p/go-libp2p/core/crypto/key_openssl.go deleted file mode 100644 index 7a13ff69..00000000 --- a/vendor/github.com/libp2p/go-libp2p/core/crypto/key_openssl.go +++ /dev/null @@ -1,101 +0,0 @@ -//go:build openssl -// +build openssl - -package crypto - -import ( - "crypto" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/rsa" - "crypto/x509" - - "github.com/libp2p/go-libp2p/core/internal/catch" - - "github.com/decred/dcrd/dcrec/secp256k1/v4" - "github.com/libp2p/go-openssl" -) - -// KeyPairFromStdKey wraps standard library (and secp256k1) private keys in libp2p/go-libp2p/core/crypto keys -func KeyPairFromStdKey(priv crypto.PrivateKey) (_priv PrivKey, _pub PubKey, err error) { - if priv == nil { - return nil, nil, ErrNilPrivateKey - } - - switch p := priv.(type) { - case *rsa.PrivateKey: - defer func() { catch.HandlePanic(recover(), &err, "x509 private key marshaling") }() - pk, err := openssl.LoadPrivateKeyFromDER(x509.MarshalPKCS1PrivateKey(p)) - if err != nil { - return nil, nil, err - } - - return &opensslPrivateKey{pk}, &opensslPublicKey{key: pk}, nil - - case *ecdsa.PrivateKey: - return &ECDSAPrivateKey{p}, &ECDSAPublicKey{&p.PublicKey}, nil - - case *ed25519.PrivateKey: - pubIfc := p.Public() - pub, _ := pubIfc.(ed25519.PublicKey) - return &Ed25519PrivateKey{*p}, &Ed25519PublicKey{pub}, nil - - case *secp256k1.PrivateKey: - sPriv := Secp256k1PrivateKey(*p) - sPub := Secp256k1PublicKey(*p.PubKey()) - return &sPriv, &sPub, nil - - default: - return nil, nil, ErrBadKeyType - } -} - -// PrivKeyToStdKey converts libp2p/go-libp2p/core/crypto private keys to standard library (and secp256k1) private keys -func PrivKeyToStdKey(priv PrivKey) (_priv crypto.PrivateKey, err error) { - if priv == nil { - return nil, ErrNilPrivateKey - } - switch p := priv.(type) { - case *opensslPrivateKey: - defer func() { catch.HandlePanic(recover(), &err, "x509 private key parsing") }() - raw, err := p.Raw() - if err != nil { - return nil, err - } - return x509.ParsePKCS1PrivateKey(raw) - case *ECDSAPrivateKey: - return p.priv, nil - case *Ed25519PrivateKey: - return &p.k, nil - case *Secp256k1PrivateKey: - return p, nil - default: - return nil, ErrBadKeyType - } -} - -// PubKeyToStdKey converts libp2p/go-libp2p/core/crypto private keys to standard library (and secp256k1) public keys -func PubKeyToStdKey(pub PubKey) (key crypto.PublicKey, err error) { - if pub == nil { - return nil, ErrNilPublicKey - } - - switch p := pub.(type) { - case *opensslPublicKey: - defer func() { catch.HandlePanic(recover(), &err, "x509 public key parsing") }() - - raw, err := p.Raw() - if err != nil { - return nil, err - } - return x509.ParsePKIXPublicKey(raw) - case *ECDSAPublicKey: - return p.pub, nil - case *Ed25519PublicKey: - return p.k, nil - case *Secp256k1PublicKey: - return p, nil - default: - return nil, ErrBadKeyType - } -} diff --git a/vendor/github.com/libp2p/go-libp2p/core/crypto/key_not_openssl.go b/vendor/github.com/libp2p/go-libp2p/core/crypto/key_to_stdlib.go similarity index 97% rename from vendor/github.com/libp2p/go-libp2p/core/crypto/key_not_openssl.go rename to vendor/github.com/libp2p/go-libp2p/core/crypto/key_to_stdlib.go index 00324675..aead1d25 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/crypto/key_not_openssl.go +++ b/vendor/github.com/libp2p/go-libp2p/core/crypto/key_to_stdlib.go @@ -1,6 +1,3 @@ -//go:build !openssl -// +build !openssl - package crypto import ( diff --git a/vendor/github.com/libp2p/go-libp2p/core/crypto/openssl_common.go b/vendor/github.com/libp2p/go-libp2p/core/crypto/openssl_common.go deleted file mode 100644 index d97eb08b..00000000 --- a/vendor/github.com/libp2p/go-libp2p/core/crypto/openssl_common.go +++ /dev/null @@ -1,104 +0,0 @@ -//go:build openssl -// +build openssl - -package crypto - -import ( - "sync" - - pb "github.com/libp2p/go-libp2p/core/crypto/pb" - - "github.com/libp2p/go-openssl" -) - -// define these as separate types so we can add more key types later and reuse -// code. - -type opensslPublicKey struct { - key openssl.PublicKey - - cacheLk sync.Mutex - cached []byte -} - -type opensslPrivateKey struct { - key openssl.PrivateKey -} - -func unmarshalOpensslPrivateKey(b []byte) (opensslPrivateKey, error) { - sk, err := openssl.LoadPrivateKeyFromDER(b) - if err != nil { - return opensslPrivateKey{}, err - } - return opensslPrivateKey{sk}, nil -} - -func unmarshalOpensslPublicKey(b []byte) (opensslPublicKey, error) { - sk, err := openssl.LoadPublicKeyFromDER(b) - if err != nil { - return opensslPublicKey{}, err - } - return opensslPublicKey{key: sk, cached: b}, nil -} - -// Verify compares a signature against input data -func (pk *opensslPublicKey) Verify(data, sig []byte) (bool, error) { - err := pk.key.VerifyPKCS1v15(openssl.SHA256_Method, data, sig) - return err == nil, err -} - -func (pk *opensslPublicKey) Type() pb.KeyType { - switch pk.key.KeyType() { - case openssl.KeyTypeRSA: - return pb.KeyType_RSA - default: - return -1 - } -} - -func (pk *opensslPublicKey) Raw() ([]byte, error) { - return pk.key.MarshalPKIXPublicKeyDER() -} - -// Equals checks whether this key is equal to another -func (pk *opensslPublicKey) Equals(k Key) bool { - k0, ok := k.(*RsaPublicKey) - if !ok { - return basicEquals(pk, k) - } - - return pk.key.Equal(k0.opensslPublicKey.key) -} - -// Sign returns a signature of the input data -func (sk *opensslPrivateKey) Sign(message []byte) ([]byte, error) { - return sk.key.SignPKCS1v15(openssl.SHA256_Method, message) -} - -// GetPublic returns a public key -func (sk *opensslPrivateKey) GetPublic() PubKey { - return &opensslPublicKey{key: sk.key} -} - -func (sk *opensslPrivateKey) Type() pb.KeyType { - switch sk.key.KeyType() { - case openssl.KeyTypeRSA: - return pb.KeyType_RSA - default: - return -1 - } -} - -func (sk *opensslPrivateKey) Raw() ([]byte, error) { - return sk.key.MarshalPKCS1PrivateKeyDER() -} - -// Equals checks whether this key is equal to another -func (sk *opensslPrivateKey) Equals(k Key) bool { - k0, ok := k.(*RsaPrivateKey) - if !ok { - return basicEquals(sk, k) - } - - return sk.key.Equal(k0.opensslPrivateKey.key) -} diff --git a/vendor/github.com/libp2p/go-libp2p/core/crypto/pb/Makefile b/vendor/github.com/libp2p/go-libp2p/core/crypto/pb/Makefile deleted file mode 100644 index 8af2dd81..00000000 --- a/vendor/github.com/libp2p/go-libp2p/core/crypto/pb/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -PB = $(wildcard *.proto) -GO = $(PB:.proto=.pb.go) - -all: $(GO) - -%.pb.go: %.proto - protoc --proto_path=$(PWD)/../..:. --gogofaster_out=. $< - -clean: - rm -f *.pb.go - rm -f *.go diff --git a/vendor/github.com/libp2p/go-libp2p/core/crypto/pb/crypto.pb.go b/vendor/github.com/libp2p/go-libp2p/core/crypto/pb/crypto.pb.go index 072fad9c..0b406794 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/crypto/pb/crypto.pb.go +++ b/vendor/github.com/libp2p/go-libp2p/core/crypto/pb/crypto.pb.go @@ -1,27 +1,24 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: crypto.proto +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: pb/crypto.proto -package crypto_pb +package pb import ( - fmt "fmt" - github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type KeyType int32 @@ -32,19 +29,21 @@ const ( KeyType_ECDSA KeyType = 3 ) -var KeyType_name = map[int32]string{ - 0: "RSA", - 1: "Ed25519", - 2: "Secp256k1", - 3: "ECDSA", -} - -var KeyType_value = map[string]int32{ - "RSA": 0, - "Ed25519": 1, - "Secp256k1": 2, - "ECDSA": 3, -} +// Enum value maps for KeyType. +var ( + KeyType_name = map[int32]string{ + 0: "RSA", + 1: "Ed25519", + 2: "Secp256k1", + 3: "ECDSA", + } + KeyType_value = map[string]int32{ + "RSA": 0, + "Ed25519": 1, + "Secp256k1": 2, + "ECDSA": 3, + } +) func (x KeyType) Enum() *KeyType { p := new(KeyType) @@ -53,573 +52,246 @@ func (x KeyType) Enum() *KeyType { } func (x KeyType) String() string { - return proto.EnumName(KeyType_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (KeyType) Descriptor() protoreflect.EnumDescriptor { + return file_pb_crypto_proto_enumTypes[0].Descriptor() +} + +func (KeyType) Type() protoreflect.EnumType { + return &file_pb_crypto_proto_enumTypes[0] +} + +func (x KeyType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) } -func (x *KeyType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(KeyType_value, data, "KeyType") +// Deprecated: Do not use. +func (x *KeyType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) if err != nil { return err } - *x = KeyType(value) + *x = KeyType(num) return nil } +// Deprecated: Use KeyType.Descriptor instead. func (KeyType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_527278fb02d03321, []int{0} + return file_pb_crypto_proto_rawDescGZIP(), []int{0} } type PublicKey struct { - Type KeyType `protobuf:"varint,1,req,name=Type,enum=crypto.pb.KeyType" json:"Type"` - Data []byte `protobuf:"bytes,2,req,name=Data" json:"Data"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *PublicKey) Reset() { *m = PublicKey{} } -func (m *PublicKey) String() string { return proto.CompactTextString(m) } -func (*PublicKey) ProtoMessage() {} -func (*PublicKey) Descriptor() ([]byte, []int) { - return fileDescriptor_527278fb02d03321, []int{0} -} -func (m *PublicKey) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PublicKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PublicKey.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PublicKey) XXX_Merge(src proto.Message) { - xxx_messageInfo_PublicKey.Merge(m, src) -} -func (m *PublicKey) XXX_Size() int { - return m.Size() -} -func (m *PublicKey) XXX_DiscardUnknown() { - xxx_messageInfo_PublicKey.DiscardUnknown(m) + Type *KeyType `protobuf:"varint,1,req,name=Type,enum=crypto.pb.KeyType" json:"Type,omitempty"` + Data []byte `protobuf:"bytes,2,req,name=Data" json:"Data,omitempty"` } -var xxx_messageInfo_PublicKey proto.InternalMessageInfo - -func (m *PublicKey) GetType() KeyType { - if m != nil { - return m.Type +func (x *PublicKey) Reset() { + *x = PublicKey{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_crypto_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return KeyType_RSA } -func (m *PublicKey) GetData() []byte { - if m != nil { - return m.Data - } - return nil +func (x *PublicKey) String() string { + return protoimpl.X.MessageStringOf(x) } -type PrivateKey struct { - Type KeyType `protobuf:"varint,1,req,name=Type,enum=crypto.pb.KeyType" json:"Type"` - Data []byte `protobuf:"bytes,2,req,name=Data" json:"Data"` -} +func (*PublicKey) ProtoMessage() {} -func (m *PrivateKey) Reset() { *m = PrivateKey{} } -func (m *PrivateKey) String() string { return proto.CompactTextString(m) } -func (*PrivateKey) ProtoMessage() {} -func (*PrivateKey) Descriptor() ([]byte, []int) { - return fileDescriptor_527278fb02d03321, []int{1} -} -func (m *PrivateKey) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PrivateKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PrivateKey.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (x *PublicKey) ProtoReflect() protoreflect.Message { + mi := &file_pb_crypto_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *PrivateKey) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrivateKey.Merge(m, src) -} -func (m *PrivateKey) XXX_Size() int { - return m.Size() -} -func (m *PrivateKey) XXX_DiscardUnknown() { - xxx_messageInfo_PrivateKey.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_PrivateKey proto.InternalMessageInfo +// Deprecated: Use PublicKey.ProtoReflect.Descriptor instead. +func (*PublicKey) Descriptor() ([]byte, []int) { + return file_pb_crypto_proto_rawDescGZIP(), []int{0} +} -func (m *PrivateKey) GetType() KeyType { - if m != nil { - return m.Type +func (x *PublicKey) GetType() KeyType { + if x != nil && x.Type != nil { + return *x.Type } return KeyType_RSA } -func (m *PrivateKey) GetData() []byte { - if m != nil { - return m.Data +func (x *PublicKey) GetData() []byte { + if x != nil { + return x.Data } return nil } -func init() { - proto.RegisterEnum("crypto.pb.KeyType", KeyType_name, KeyType_value) - proto.RegisterType((*PublicKey)(nil), "crypto.pb.PublicKey") - proto.RegisterType((*PrivateKey)(nil), "crypto.pb.PrivateKey") -} +type PrivateKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func init() { proto.RegisterFile("crypto.proto", fileDescriptor_527278fb02d03321) } - -var fileDescriptor_527278fb02d03321 = []byte{ - // 203 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x49, 0x2e, 0xaa, 0x2c, - 0x28, 0xc9, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x84, 0xf1, 0x92, 0x94, 0x82, 0xb9, - 0x38, 0x03, 0x4a, 0x93, 0x72, 0x32, 0x93, 0xbd, 0x53, 0x2b, 0x85, 0x74, 0xb8, 0x58, 0x42, 0x2a, - 0x0b, 0x52, 0x25, 0x18, 0x15, 0x98, 0x34, 0xf8, 0x8c, 0x84, 0xf4, 0xe0, 0xca, 0xf4, 0xbc, 0x53, - 0x2b, 0x41, 0x32, 0x4e, 0x2c, 0x27, 0xee, 0xc9, 0x33, 0x04, 0x81, 0x55, 0x09, 0x49, 0x70, 0xb1, - 0xb8, 0x24, 0x96, 0x24, 0x4a, 0x30, 0x29, 0x30, 0x69, 0xf0, 0xc0, 0x64, 0x40, 0x22, 0x4a, 0x21, - 0x5c, 0x5c, 0x01, 0x45, 0x99, 0x65, 0x89, 0x25, 0xa9, 0x54, 0x34, 0x55, 0xcb, 0x92, 0x8b, 0x1d, - 0xaa, 0x41, 0x88, 0x9d, 0x8b, 0x39, 0x28, 0xd8, 0x51, 0x80, 0x41, 0x88, 0x9b, 0x8b, 0xdd, 0x35, - 0xc5, 0xc8, 0xd4, 0xd4, 0xd0, 0x52, 0x80, 0x51, 0x88, 0x97, 0x8b, 0x33, 0x38, 0x35, 0xb9, 0xc0, - 0xc8, 0xd4, 0x2c, 0xdb, 0x50, 0x80, 0x49, 0x88, 0x93, 0x8b, 0xd5, 0xd5, 0xd9, 0x25, 0xd8, 0x51, - 0x80, 0xd9, 0x49, 0xe2, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, - 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0x00, 0x01, 0x00, - 0x00, 0xff, 0xff, 0x13, 0xbe, 0xd4, 0xff, 0x19, 0x01, 0x00, 0x00, + Type *KeyType `protobuf:"varint,1,req,name=Type,enum=crypto.pb.KeyType" json:"Type,omitempty"` + Data []byte `protobuf:"bytes,2,req,name=Data" json:"Data,omitempty"` } -func (m *PublicKey) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *PrivateKey) Reset() { + *x = PrivateKey{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_crypto_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *PublicKey) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *PrivateKey) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PublicKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Data != nil { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintCrypto(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x12 - } - i = encodeVarintCrypto(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} +func (*PrivateKey) ProtoMessage() {} -func (m *PrivateKey) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *PrivateKey) ProtoReflect() protoreflect.Message { + mi := &file_pb_crypto_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil -} - -func (m *PrivateKey) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return mi.MessageOf(x) } -func (m *PrivateKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Data != nil { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintCrypto(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x12 - } - i = encodeVarintCrypto(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil +// Deprecated: Use PrivateKey.ProtoReflect.Descriptor instead. +func (*PrivateKey) Descriptor() ([]byte, []int) { + return file_pb_crypto_proto_rawDescGZIP(), []int{1} } -func encodeVarintCrypto(dAtA []byte, offset int, v uint64) int { - offset -= sovCrypto(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *PublicKey) Size() (n int) { - if m == nil { - return 0 +func (x *PrivateKey) GetType() KeyType { + if x != nil && x.Type != nil { + return *x.Type } - var l int - _ = l - n += 1 + sovCrypto(uint64(m.Type)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovCrypto(uint64(l)) - } - return n + return KeyType_RSA } -func (m *PrivateKey) Size() (n int) { - if m == nil { - return 0 +func (x *PrivateKey) GetData() []byte { + if x != nil { + return x.Data } - var l int - _ = l - n += 1 + sovCrypto(uint64(m.Type)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovCrypto(uint64(l)) - } - return n + return nil } -func sovCrypto(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 +var File_pb_crypto_proto protoreflect.FileDescriptor + +var file_pb_crypto_proto_rawDesc = []byte{ + 0x0a, 0x0f, 0x70, 0x62, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x09, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x2e, 0x70, 0x62, 0x22, 0x47, 0x0a, 0x09, + 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x04, 0x54, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, + 0x2e, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x02, 0x28, 0x0c, 0x52, + 0x04, 0x44, 0x61, 0x74, 0x61, 0x22, 0x48, 0x0a, 0x0a, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, + 0x4b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, + 0x0e, 0x32, 0x12, 0x2e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x2e, 0x70, 0x62, 0x2e, 0x4b, 0x65, + 0x79, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x44, + 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x02, 0x28, 0x0c, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, 0x2a, + 0x39, 0x0a, 0x07, 0x4b, 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x07, 0x0a, 0x03, 0x52, 0x53, + 0x41, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x45, 0x64, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x01, + 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x65, 0x63, 0x70, 0x32, 0x35, 0x36, 0x6b, 0x31, 0x10, 0x02, 0x12, + 0x09, 0x0a, 0x05, 0x45, 0x43, 0x44, 0x53, 0x41, 0x10, 0x03, 0x42, 0x2c, 0x5a, 0x2a, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6c, 0x69, 0x62, 0x70, 0x32, 0x70, 0x2f, + 0x67, 0x6f, 0x2d, 0x6c, 0x69, 0x62, 0x70, 0x32, 0x70, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x2f, 0x70, 0x62, } -func sozCrypto(x uint64) (n int) { - return sovCrypto(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *PublicKey) Unmarshal(dAtA []byte) error { - var hasFields [1]uint64 - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCrypto - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PublicKey: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PublicKey: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCrypto - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= KeyType(b&0x7F) << shift - if b < 0x80 { - break - } - } - hasFields[0] |= uint64(0x00000001) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCrypto - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCrypto - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCrypto - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - hasFields[0] |= uint64(0x00000002) - default: - iNdEx = preIndex - skippy, err := skipCrypto(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCrypto - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCrypto - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if hasFields[0]&uint64(0x00000001) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("Type") - } - if hasFields[0]&uint64(0x00000002) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("Data") - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PrivateKey) Unmarshal(dAtA []byte) error { - var hasFields [1]uint64 - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCrypto - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PrivateKey: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PrivateKey: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCrypto - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= KeyType(b&0x7F) << shift - if b < 0x80 { - break - } - } - hasFields[0] |= uint64(0x00000001) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCrypto - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCrypto - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCrypto - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - hasFields[0] |= uint64(0x00000002) - default: - iNdEx = preIndex - skippy, err := skipCrypto(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCrypto - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCrypto - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if hasFields[0]&uint64(0x00000001) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("Type") - } - if hasFields[0]&uint64(0x00000002) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("Data") - } +var ( + file_pb_crypto_proto_rawDescOnce sync.Once + file_pb_crypto_proto_rawDescData = file_pb_crypto_proto_rawDesc +) - if iNdEx > l { - return io.ErrUnexpectedEOF +func file_pb_crypto_proto_rawDescGZIP() []byte { + file_pb_crypto_proto_rawDescOnce.Do(func() { + file_pb_crypto_proto_rawDescData = protoimpl.X.CompressGZIP(file_pb_crypto_proto_rawDescData) + }) + return file_pb_crypto_proto_rawDescData +} + +var file_pb_crypto_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_pb_crypto_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_pb_crypto_proto_goTypes = []interface{}{ + (KeyType)(0), // 0: crypto.pb.KeyType + (*PublicKey)(nil), // 1: crypto.pb.PublicKey + (*PrivateKey)(nil), // 2: crypto.pb.PrivateKey +} +var file_pb_crypto_proto_depIdxs = []int32{ + 0, // 0: crypto.pb.PublicKey.Type:type_name -> crypto.pb.KeyType + 0, // 1: crypto.pb.PrivateKey.Type:type_name -> crypto.pb.KeyType + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_pb_crypto_proto_init() } +func file_pb_crypto_proto_init() { + if File_pb_crypto_proto != nil { + return } - return nil -} -func skipCrypto(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCrypto - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + if !protoimpl.UnsafeEnabled { + file_pb_crypto_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublicKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCrypto - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCrypto - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + file_pb_crypto_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrivateKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - if length < 0 { - return 0, ErrInvalidLengthCrypto - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupCrypto - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthCrypto - } - if depth == 0 { - return iNdEx, nil } } - return 0, io.ErrUnexpectedEOF + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pb_crypto_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pb_crypto_proto_goTypes, + DependencyIndexes: file_pb_crypto_proto_depIdxs, + EnumInfos: file_pb_crypto_proto_enumTypes, + MessageInfos: file_pb_crypto_proto_msgTypes, + }.Build() + File_pb_crypto_proto = out.File + file_pb_crypto_proto_rawDesc = nil + file_pb_crypto_proto_goTypes = nil + file_pb_crypto_proto_depIdxs = nil } - -var ( - ErrInvalidLengthCrypto = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowCrypto = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupCrypto = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_common.go b/vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_common.go index c7e30543..2b05eb6a 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_common.go +++ b/vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_common.go @@ -12,9 +12,12 @@ const WeakRsaKeyEnv = "LIBP2P_ALLOW_WEAK_RSA_KEYS" var MinRsaKeyBits = 2048 +var maxRsaKeyBits = 8192 + // ErrRsaKeyTooSmall is returned when trying to generate or parse an RSA key // that's smaller than MinRsaKeyBits bits. In test var ErrRsaKeyTooSmall error +var ErrRsaKeyTooBig error = fmt.Errorf("rsa keys must be <= %d bits", maxRsaKeyBits) func init() { if _, ok := os.LookupEnv(WeakRsaKeyEnv); ok { diff --git a/vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_go.go b/vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_go.go index 1324447d..f1539309 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_go.go +++ b/vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_go.go @@ -1,6 +1,3 @@ -//go:build !openssl -// +build !openssl - package crypto import ( @@ -34,6 +31,9 @@ func GenerateRSAKeyPair(bits int, src io.Reader) (PrivKey, PubKey, error) { if bits < MinRsaKeyBits { return nil, nil, ErrRsaKeyTooSmall } + if bits > maxRsaKeyBits { + return nil, nil, ErrRsaKeyTooBig + } priv, err := rsa.GenerateKey(src, bits) if err != nil { return nil, nil, err @@ -127,6 +127,9 @@ func UnmarshalRsaPrivateKey(b []byte) (key PrivKey, err error) { if sk.N.BitLen() < MinRsaKeyBits { return nil, ErrRsaKeyTooSmall } + if sk.N.BitLen() > maxRsaKeyBits { + return nil, ErrRsaKeyTooBig + } return &RsaPrivateKey{sk: *sk}, nil } @@ -144,6 +147,9 @@ func UnmarshalRsaPublicKey(b []byte) (key PubKey, err error) { if pk.N.BitLen() < MinRsaKeyBits { return nil, ErrRsaKeyTooSmall } + if pk.N.BitLen() > maxRsaKeyBits { + return nil, ErrRsaKeyTooBig + } return &RsaPublicKey{k: *pk}, nil } diff --git a/vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_openssl.go b/vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_openssl.go deleted file mode 100644 index 4e8269ff..00000000 --- a/vendor/github.com/libp2p/go-libp2p/core/crypto/rsa_openssl.go +++ /dev/null @@ -1,69 +0,0 @@ -//go:build openssl -// +build openssl - -package crypto - -import ( - "errors" - "io" - - openssl "github.com/libp2p/go-openssl" -) - -// RsaPrivateKey is an rsa private key -type RsaPrivateKey struct { - opensslPrivateKey -} - -// RsaPublicKey is an rsa public key -type RsaPublicKey struct { - opensslPublicKey -} - -// GenerateRSAKeyPair generates a new rsa private and public key -func GenerateRSAKeyPair(bits int, _ io.Reader) (PrivKey, PubKey, error) { - if bits < MinRsaKeyBits { - return nil, nil, ErrRsaKeyTooSmall - } - - key, err := openssl.GenerateRSAKey(bits) - if err != nil { - return nil, nil, err - } - return &RsaPrivateKey{opensslPrivateKey{key}}, &RsaPublicKey{opensslPublicKey{key: key}}, nil -} - -// GetPublic returns a public key -func (sk *RsaPrivateKey) GetPublic() PubKey { - return &RsaPublicKey{opensslPublicKey{key: sk.opensslPrivateKey.key}} -} - -// UnmarshalRsaPrivateKey returns a private key from the input x509 bytes -func UnmarshalRsaPrivateKey(b []byte) (PrivKey, error) { - key, err := unmarshalOpensslPrivateKey(b) - if err != nil { - return nil, err - } - if 8*key.key.Size() < MinRsaKeyBits { - return nil, ErrRsaKeyTooSmall - } - if key.Type() != RSA { - return nil, errors.New("not actually an rsa public key") - } - return &RsaPrivateKey{key}, nil -} - -// UnmarshalRsaPublicKey returns a public key from the input x509 bytes -func UnmarshalRsaPublicKey(b []byte) (PubKey, error) { - key, err := unmarshalOpensslPublicKey(b) - if err != nil { - return nil, err - } - if 8*key.key.Size() < MinRsaKeyBits { - return nil, ErrRsaKeyTooSmall - } - if key.Type() != RSA { - return nil, errors.New("not actually an rsa public key") - } - return &RsaPublicKey{key}, nil -} diff --git a/vendor/github.com/libp2p/go-libp2p/core/event/bus.go b/vendor/github.com/libp2p/go-libp2p/core/event/bus.go index 0cd8d2ff..13e18e53 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/event/bus.go +++ b/vendor/github.com/libp2p/go-libp2p/core/event/bus.go @@ -39,6 +39,9 @@ type Subscription interface { // Out returns the channel from which to consume events. Out() <-chan interface{} + + // Name returns the name for the subscription + Name() string } // Bus is an interface for a type-based event delivery system. diff --git a/vendor/github.com/libp2p/go-libp2p/core/host/host.go b/vendor/github.com/libp2p/go-libp2p/core/host/host.go index cfea91e5..e62be281 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/host/host.go +++ b/vendor/github.com/libp2p/go-libp2p/core/host/host.go @@ -8,7 +8,6 @@ import ( "github.com/libp2p/go-libp2p/core/connmgr" "github.com/libp2p/go-libp2p/core/event" - "github.com/libp2p/go-libp2p/core/introspection" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" @@ -53,7 +52,7 @@ type Host interface { // SetStreamHandlerMatch sets the protocol handler on the Host's Mux // using a matching function for protocol selection. - SetStreamHandlerMatch(protocol.ID, func(string) bool, network.StreamHandler) + SetStreamHandlerMatch(protocol.ID, func(protocol.ID) bool, network.StreamHandler) // RemoveStreamHandler removes a handler on the mux that was set by // SetStreamHandler @@ -74,16 +73,3 @@ type Host interface { // EventBus returns the hosts eventbus EventBus() event.Bus } - -// IntrospectableHost is implemented by Host implementations that are -// introspectable, that is, that may have introspection capability. -type IntrospectableHost interface { - // Introspector returns the introspector, or nil if one hasn't been - // registered. With it, the call can register data providers, and can fetch - // introspection data. - Introspector() introspection.Introspector - - // IntrospectionEndpoint returns the introspection endpoint, or nil if one - // hasn't been registered. - IntrospectionEndpoint() introspection.Endpoint -} diff --git a/vendor/github.com/libp2p/go-libp2p/core/introspection/doc.go b/vendor/github.com/libp2p/go-libp2p/core/introspection/doc.go deleted file mode 100644 index 302c23f4..00000000 --- a/vendor/github.com/libp2p/go-libp2p/core/introspection/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package introspection is EXPERIMENTAL. It is subject to heavy change, and it -// WILL change. For now, it is the simplest implementation to power the -// proof-of-concept of the libp2p introspection framework. -// -// Package introspect contains the abstract skeleton of the introspection system -// of go-libp2p, and holds the introspection data schema. -package introspection diff --git a/vendor/github.com/libp2p/go-libp2p/core/introspection/endpoint.go b/vendor/github.com/libp2p/go-libp2p/core/introspection/endpoint.go deleted file mode 100644 index 51596a46..00000000 --- a/vendor/github.com/libp2p/go-libp2p/core/introspection/endpoint.go +++ /dev/null @@ -1,30 +0,0 @@ -package introspection - -// Endpoint is the interface to be implemented by introspection endpoints. -// -// An introspection endpoint makes introspection data accessible to external -// consumers, over, for example, WebSockets, or TCP, or libp2p itself. -// -// Experimental. -type Endpoint interface { - // Start starts the introspection endpoint. It must only be called once, and - // once the server is started, subsequent calls made without first calling - // Close will error. - Start() error - - // Close stops the introspection endpoint. Calls to Close on an already - // closed endpoint (or an unstarted endpoint) must noop. - Close() error - - // ListenAddrs returns the listen addresses of this endpoint. - ListenAddrs() []string - - // Sessions returns the ongoing sessions of this endpoint. - Sessions() []*Session -} - -// Session represents an introspection session. -type Session struct { - // RemoteAddr is the remote address of the session. - RemoteAddr string -} diff --git a/vendor/github.com/libp2p/go-libp2p/core/introspection/introspector.go b/vendor/github.com/libp2p/go-libp2p/core/introspection/introspector.go deleted file mode 100644 index e39f9673..00000000 --- a/vendor/github.com/libp2p/go-libp2p/core/introspection/introspector.go +++ /dev/null @@ -1,39 +0,0 @@ -package introspection - -import ( - "io" - - "github.com/libp2p/go-libp2p/core/introspection/pb" -) - -// Introspector is the interface to be satisfied by components that are capable -// of spelunking the state of the system, and representing in accordance with -// the introspection schema. -// -// It's very rare to build a custom implementation of this interface; -// it exists mostly for mocking. In most cases, you'll end up using the -// default introspector. -// -// Introspector implementations are usually injected in introspection endpoints -// to serve the data to clients, but they can also be used separately for -// embedding or testing. -// -// Experimental. -type Introspector interface { - io.Closer - - // FetchRuntime returns the runtime information of the system. - FetchRuntime() (*pb.Runtime, error) - - // FetchFullState returns the full state cross-cut of the running system. - FetchFullState() (*pb.State, error) - - // EventChan returns the channel where all eventbus events are dumped, - // decorated with their corresponding event metadata, ready to send over - // the wire. - EventChan() <-chan *pb.Event - - // EventMetadata returns the metadata of all events known to the - // Introspector. - EventMetadata() []*pb.EventType -} diff --git a/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/Makefile b/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/Makefile deleted file mode 100644 index 73131765..00000000 --- a/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -PB = $(wildcard *.proto) -GO = $(PB:.proto=.pb.go) - -all: $(GO) - -%.pb.go: %.proto - protoc --proto_path=$(PWD):$(PWD)/../..:$(GOPATH)/src --gogofaster_out=Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types:. $< - -clean: - rm -f *.pb.go - rm -f *.go \ No newline at end of file diff --git a/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/doc.go b/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/doc.go deleted file mode 100644 index 58f6c50d..00000000 --- a/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package introspection/pb contains the protobuf definitions and objects for -// that form the libp2p introspection protocol. -package pb diff --git a/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/introspection.pb.go b/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/introspection.pb.go deleted file mode 100644 index b8c609d8..00000000 --- a/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/introspection.pb.go +++ /dev/null @@ -1,9718 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: introspection.proto - -package pb - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// The status of a connection or stream. -type Status int32 - -const ( - Status_ACTIVE Status = 0 - Status_CLOSED Status = 1 - Status_OPENING Status = 2 - Status_CLOSING Status = 3 - Status_ERROR Status = 4 -) - -var Status_name = map[int32]string{ - 0: "ACTIVE", - 1: "CLOSED", - 2: "OPENING", - 3: "CLOSING", - 4: "ERROR", -} - -var Status_value = map[string]int32{ - "ACTIVE": 0, - "CLOSED": 1, - "OPENING": 2, - "CLOSING": 3, - "ERROR": 4, -} - -func (x Status) String() string { - return proto.EnumName(Status_name, int32(x)) -} - -func (Status) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{0} -} - -// Our role in a connection or stream. -type Role int32 - -const ( - Role_INITIATOR Role = 0 - Role_RESPONDER Role = 1 -) - -var Role_name = map[int32]string{ - 0: "INITIATOR", - 1: "RESPONDER", -} - -var Role_value = map[string]int32{ - "INITIATOR": 0, - "RESPONDER": 1, -} - -func (x Role) String() string { - return proto.EnumName(Role_name, int32(x)) -} - -func (Role) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{1} -} - -// tells client how to sort, filter or display known content properties -type EventType_EventProperty_PropertyType int32 - -const ( - // for properties to treat as a simple primitive - EventType_EventProperty_STRING EventType_EventProperty_PropertyType = 0 - EventType_EventProperty_NUMBER EventType_EventProperty_PropertyType = 1 - // for properties with special human-readable formatting - EventType_EventProperty_TIME EventType_EventProperty_PropertyType = 10 - EventType_EventProperty_PEERID EventType_EventProperty_PropertyType = 11 - EventType_EventProperty_MULTIADDR EventType_EventProperty_PropertyType = 12 - // for complex structures like nested arrays, object trees etc - EventType_EventProperty_JSON EventType_EventProperty_PropertyType = 90 -) - -var EventType_EventProperty_PropertyType_name = map[int32]string{ - 0: "STRING", - 1: "NUMBER", - 10: "TIME", - 11: "PEERID", - 12: "MULTIADDR", - 90: "JSON", -} - -var EventType_EventProperty_PropertyType_value = map[string]int32{ - "STRING": 0, - "NUMBER": 1, - "TIME": 10, - "PEERID": 11, - "MULTIADDR": 12, - "JSON": 90, -} - -func (x EventType_EventProperty_PropertyType) String() string { - return proto.EnumName(EventType_EventProperty_PropertyType_name, int32(x)) -} - -func (EventType_EventProperty_PropertyType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{4, 0, 0} -} - -// The DHT's relationship with this peer -type DHT_PeerInDHT_Status int32 - -const ( - // Connected, in a bucket, ready to send/receive queries - DHT_PeerInDHT_ACTIVE DHT_PeerInDHT_Status = 0 - // Not currently connected, still "in" a bucket (e.g. temporarily disconnected) - DHT_PeerInDHT_MISSING DHT_PeerInDHT_Status = 1 - // Removed from a bucket or candidate list (e.g. connection lost or too slow) - DHT_PeerInDHT_REJECTED DHT_PeerInDHT_Status = 2 - // Was reachable when last checked, waiting to join a currently-full bucket - DHT_PeerInDHT_CANDIDATE DHT_PeerInDHT_Status = 3 -) - -var DHT_PeerInDHT_Status_name = map[int32]string{ - 0: "ACTIVE", - 1: "MISSING", - 2: "REJECTED", - 3: "CANDIDATE", -} - -var DHT_PeerInDHT_Status_value = map[string]int32{ - "ACTIVE": 0, - "MISSING": 1, - "REJECTED": 2, - "CANDIDATE": 3, -} - -func (x DHT_PeerInDHT_Status) String() string { - return proto.EnumName(DHT_PeerInDHT_Status_name, int32(x)) -} - -func (DHT_PeerInDHT_Status) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{11, 1, 0} -} - -type ClientCommand_Source int32 - -const ( - ClientCommand_STATE ClientCommand_Source = 0 - ClientCommand_RUNTIME ClientCommand_Source = 1 - ClientCommand_EVENTS ClientCommand_Source = 2 -) - -var ClientCommand_Source_name = map[int32]string{ - 0: "STATE", - 1: "RUNTIME", - 2: "EVENTS", -} - -var ClientCommand_Source_value = map[string]int32{ - "STATE": 0, - "RUNTIME": 1, - "EVENTS": 2, -} - -func (x ClientCommand_Source) String() string { - return proto.EnumName(ClientCommand_Source_name, int32(x)) -} - -func (ClientCommand_Source) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{17, 0} -} - -type ClientCommand_Command int32 - -const ( - // HELLO is the first command that a client must send to greet the server. - // Connections that do not respect this invariant will be terminated. - ClientCommand_HELLO ClientCommand_Command = 0 - // REQUEST is applicable to STATE and RUNTIME sources. - ClientCommand_REQUEST ClientCommand_Command = 1 - // PUSH streams can only be started for STATE and EVENTS sources. - ClientCommand_PUSH_ENABLE ClientCommand_Command = 2 - ClientCommand_PUSH_DISABLE ClientCommand_Command = 3 - ClientCommand_PUSH_PAUSE ClientCommand_Command = 4 - ClientCommand_PUSH_RESUME ClientCommand_Command = 5 - // UPDATE_CONFIG requests a configuration update. The config field is - // compulsory. - // - // The server reserves the right to override the requested values, and - // will return the effective configuration in the response. - ClientCommand_UPDATE_CONFIG ClientCommand_Command = 7 -) - -var ClientCommand_Command_name = map[int32]string{ - 0: "HELLO", - 1: "REQUEST", - 2: "PUSH_ENABLE", - 3: "PUSH_DISABLE", - 4: "PUSH_PAUSE", - 5: "PUSH_RESUME", - 7: "UPDATE_CONFIG", -} - -var ClientCommand_Command_value = map[string]int32{ - "HELLO": 0, - "REQUEST": 1, - "PUSH_ENABLE": 2, - "PUSH_DISABLE": 3, - "PUSH_PAUSE": 4, - "PUSH_RESUME": 5, - "UPDATE_CONFIG": 7, -} - -func (x ClientCommand_Command) String() string { - return proto.EnumName(ClientCommand_Command_name, int32(x)) -} - -func (ClientCommand_Command) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{17, 1} -} - -type CommandResponse_Result int32 - -const ( - CommandResponse_OK CommandResponse_Result = 0 - CommandResponse_ERR CommandResponse_Result = 1 -) - -var CommandResponse_Result_name = map[int32]string{ - 0: "OK", - 1: "ERR", -} - -var CommandResponse_Result_value = map[string]int32{ - "OK": 0, - "ERR": 1, -} - -func (x CommandResponse_Result) String() string { - return proto.EnumName(CommandResponse_Result_name, int32(x)) -} - -func (CommandResponse_Result) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{18, 0} -} - -type ServerNotice_Kind int32 - -const ( - ServerNotice_DISCARDING_EVENTS ServerNotice_Kind = 0 -) - -var ServerNotice_Kind_name = map[int32]string{ - 0: "DISCARDING_EVENTS", -} - -var ServerNotice_Kind_value = map[string]int32{ - "DISCARDING_EVENTS": 0, -} - -func (x ServerNotice_Kind) String() string { - return proto.EnumName(ServerNotice_Kind_name, int32(x)) -} - -func (ServerNotice_Kind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{19, 0} -} - -// Version of schema -type Version struct { - Version uint32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` -} - -func (m *Version) Reset() { *m = Version{} } -func (m *Version) String() string { return proto.CompactTextString(m) } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{0} -} -func (m *Version) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Version.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Version) XXX_Merge(src proto.Message) { - xxx_messageInfo_Version.Merge(m, src) -} -func (m *Version) XXX_Size() int { - return m.Size() -} -func (m *Version) XXX_DiscardUnknown() { - xxx_messageInfo_Version.DiscardUnknown(m) -} - -var xxx_messageInfo_Version proto.InternalMessageInfo - -func (m *Version) GetVersion() uint32 { - if m != nil { - return m.Version - } - return 0 -} - -// ResultCounter is a monotonically increasing counter that reports an ok/err breakdown of the total. -type ResultCounter struct { - Total uint32 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` - Ok uint32 `protobuf:"varint,2,opt,name=ok,proto3" json:"ok,omitempty"` - Err uint32 `protobuf:"varint,3,opt,name=err,proto3" json:"err,omitempty"` -} - -func (m *ResultCounter) Reset() { *m = ResultCounter{} } -func (m *ResultCounter) String() string { return proto.CompactTextString(m) } -func (*ResultCounter) ProtoMessage() {} -func (*ResultCounter) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{1} -} -func (m *ResultCounter) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResultCounter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResultCounter.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResultCounter) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResultCounter.Merge(m, src) -} -func (m *ResultCounter) XXX_Size() int { - return m.Size() -} -func (m *ResultCounter) XXX_DiscardUnknown() { - xxx_messageInfo_ResultCounter.DiscardUnknown(m) -} - -var xxx_messageInfo_ResultCounter proto.InternalMessageInfo - -func (m *ResultCounter) GetTotal() uint32 { - if m != nil { - return m.Total - } - return 0 -} - -func (m *ResultCounter) GetOk() uint32 { - if m != nil { - return m.Ok - } - return 0 -} - -func (m *ResultCounter) GetErr() uint32 { - if m != nil { - return m.Err - } - return 0 -} - -// Moving totals over sliding time windows. Models sensible time windows, -// we don't have to populate them all at once. -// -// Graphical example: -// -// time past -> present an event 16 min ago -// ======================================================X================>> -// -// | | 1m -// | |---| 5m -// | |-------------| 15m -// |------------X---------------| 30m -// |------------------------------------------X---------------| 60m -type SlidingCounter struct { - Over_1M uint32 `protobuf:"varint,1,opt,name=over_1m,json=over1m,proto3" json:"over_1m,omitempty"` - Over_5M uint32 `protobuf:"varint,2,opt,name=over_5m,json=over5m,proto3" json:"over_5m,omitempty"` - Over_15M uint32 `protobuf:"varint,3,opt,name=over_15m,json=over15m,proto3" json:"over_15m,omitempty"` - Over_30M uint32 `protobuf:"varint,4,opt,name=over_30m,json=over30m,proto3" json:"over_30m,omitempty"` - Over_1Hr uint32 `protobuf:"varint,5,opt,name=over_1hr,json=over1hr,proto3" json:"over_1hr,omitempty"` - Over_2Hr uint32 `protobuf:"varint,6,opt,name=over_2hr,json=over2hr,proto3" json:"over_2hr,omitempty"` - Over_4Hr uint32 `protobuf:"varint,7,opt,name=over_4hr,json=over4hr,proto3" json:"over_4hr,omitempty"` - Over_8Hr uint32 `protobuf:"varint,8,opt,name=over_8hr,json=over8hr,proto3" json:"over_8hr,omitempty"` - Over_12Hr uint32 `protobuf:"varint,9,opt,name=over_12hr,json=over12hr,proto3" json:"over_12hr,omitempty"` - Over_24Hr uint32 `protobuf:"varint,10,opt,name=over_24hr,json=over24hr,proto3" json:"over_24hr,omitempty"` -} - -func (m *SlidingCounter) Reset() { *m = SlidingCounter{} } -func (m *SlidingCounter) String() string { return proto.CompactTextString(m) } -func (*SlidingCounter) ProtoMessage() {} -func (*SlidingCounter) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{2} -} -func (m *SlidingCounter) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SlidingCounter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SlidingCounter.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SlidingCounter) XXX_Merge(src proto.Message) { - xxx_messageInfo_SlidingCounter.Merge(m, src) -} -func (m *SlidingCounter) XXX_Size() int { - return m.Size() -} -func (m *SlidingCounter) XXX_DiscardUnknown() { - xxx_messageInfo_SlidingCounter.DiscardUnknown(m) -} - -var xxx_messageInfo_SlidingCounter proto.InternalMessageInfo - -func (m *SlidingCounter) GetOver_1M() uint32 { - if m != nil { - return m.Over_1M - } - return 0 -} - -func (m *SlidingCounter) GetOver_5M() uint32 { - if m != nil { - return m.Over_5M - } - return 0 -} - -func (m *SlidingCounter) GetOver_15M() uint32 { - if m != nil { - return m.Over_15M - } - return 0 -} - -func (m *SlidingCounter) GetOver_30M() uint32 { - if m != nil { - return m.Over_30M - } - return 0 -} - -func (m *SlidingCounter) GetOver_1Hr() uint32 { - if m != nil { - return m.Over_1Hr - } - return 0 -} - -func (m *SlidingCounter) GetOver_2Hr() uint32 { - if m != nil { - return m.Over_2Hr - } - return 0 -} - -func (m *SlidingCounter) GetOver_4Hr() uint32 { - if m != nil { - return m.Over_4Hr - } - return 0 -} - -func (m *SlidingCounter) GetOver_8Hr() uint32 { - if m != nil { - return m.Over_8Hr - } - return 0 -} - -func (m *SlidingCounter) GetOver_12Hr() uint32 { - if m != nil { - return m.Over_12Hr - } - return 0 -} - -func (m *SlidingCounter) GetOver_24Hr() uint32 { - if m != nil { - return m.Over_24Hr - } - return 0 -} - -// DataGauge reports stats for data traffic in a given direction. -type DataGauge struct { - // Cumulative bytes. - CumBytes uint64 `protobuf:"varint,1,opt,name=cum_bytes,json=cumBytes,proto3" json:"cum_bytes,omitempty"` - // Cumulative packets. - CumPackets uint64 `protobuf:"varint,2,opt,name=cum_packets,json=cumPackets,proto3" json:"cum_packets,omitempty"` - // Instantaneous bandwidth measurement (bytes/second). - InstBw uint64 `protobuf:"varint,3,opt,name=inst_bw,json=instBw,proto3" json:"inst_bw,omitempty"` -} - -func (m *DataGauge) Reset() { *m = DataGauge{} } -func (m *DataGauge) String() string { return proto.CompactTextString(m) } -func (*DataGauge) ProtoMessage() {} -func (*DataGauge) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{3} -} -func (m *DataGauge) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DataGauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DataGauge.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DataGauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_DataGauge.Merge(m, src) -} -func (m *DataGauge) XXX_Size() int { - return m.Size() -} -func (m *DataGauge) XXX_DiscardUnknown() { - xxx_messageInfo_DataGauge.DiscardUnknown(m) -} - -var xxx_messageInfo_DataGauge proto.InternalMessageInfo - -func (m *DataGauge) GetCumBytes() uint64 { - if m != nil { - return m.CumBytes - } - return 0 -} - -func (m *DataGauge) GetCumPackets() uint64 { - if m != nil { - return m.CumPackets - } - return 0 -} - -func (m *DataGauge) GetInstBw() uint64 { - if m != nil { - return m.InstBw - } - return 0 -} - -// describes a type of event -type EventType struct { - // name of event type, e.g. PeerConnecting - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // for runtime, send property_types for all events already seen in events list - // for events, only send property_types in the first event of a type not in runtime - PropertyTypes []*EventType_EventProperty `protobuf:"bytes,2,rep,name=property_types,json=propertyTypes,proto3" json:"property_types,omitempty"` -} - -func (m *EventType) Reset() { *m = EventType{} } -func (m *EventType) String() string { return proto.CompactTextString(m) } -func (*EventType) ProtoMessage() {} -func (*EventType) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{4} -} -func (m *EventType) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EventType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_EventType.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *EventType) XXX_Merge(src proto.Message) { - xxx_messageInfo_EventType.Merge(m, src) -} -func (m *EventType) XXX_Size() int { - return m.Size() -} -func (m *EventType) XXX_DiscardUnknown() { - xxx_messageInfo_EventType.DiscardUnknown(m) -} - -var xxx_messageInfo_EventType proto.InternalMessageInfo - -func (m *EventType) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *EventType) GetPropertyTypes() []*EventType_EventProperty { - if m != nil { - return m.PropertyTypes - } - return nil -} - -// metadata about content types in event's top-level content JSON -type EventType_EventProperty struct { - // property name of content e.g. openTs - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // type to interpret content value as - Type EventType_EventProperty_PropertyType `protobuf:"varint,2,opt,name=type,proto3,enum=pb.EventType_EventProperty_PropertyType" json:"type,omitempty"` - // if true, expect an array of values of `type`; else, singular - HasMultiple bool `protobuf:"varint,3,opt,name=has_multiple,json=hasMultiple,proto3" json:"has_multiple,omitempty"` -} - -func (m *EventType_EventProperty) Reset() { *m = EventType_EventProperty{} } -func (m *EventType_EventProperty) String() string { return proto.CompactTextString(m) } -func (*EventType_EventProperty) ProtoMessage() {} -func (*EventType_EventProperty) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{4, 0} -} -func (m *EventType_EventProperty) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EventType_EventProperty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_EventType_EventProperty.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *EventType_EventProperty) XXX_Merge(src proto.Message) { - xxx_messageInfo_EventType_EventProperty.Merge(m, src) -} -func (m *EventType_EventProperty) XXX_Size() int { - return m.Size() -} -func (m *EventType_EventProperty) XXX_DiscardUnknown() { - xxx_messageInfo_EventType_EventProperty.DiscardUnknown(m) -} - -var xxx_messageInfo_EventType_EventProperty proto.InternalMessageInfo - -func (m *EventType_EventProperty) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *EventType_EventProperty) GetType() EventType_EventProperty_PropertyType { - if m != nil { - return m.Type - } - return EventType_EventProperty_STRING -} - -func (m *EventType_EventProperty) GetHasMultiple() bool { - if m != nil { - return m.HasMultiple - } - return false -} - -// Runtime encapsulates runtime info about a node. -type Runtime struct { - // e.g. go-libp2p, js-libp2p, rust-libp2p, etc. - Implementation string `protobuf:"bytes,1,opt,name=implementation,proto3" json:"implementation,omitempty"` - // e.g. 1.2.3. - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - // e.g. Windows, Unix, macOS, Chrome, Mozilla, etc. - Platform string `protobuf:"bytes,3,opt,name=platform,proto3" json:"platform,omitempty"` - // our peer id - the peer id of the host system - PeerId string `protobuf:"bytes,4,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"` - // metadata describing configured event types - EventTypes []*EventType `protobuf:"bytes,7,rep,name=event_types,json=eventTypes,proto3" json:"event_types,omitempty"` -} - -func (m *Runtime) Reset() { *m = Runtime{} } -func (m *Runtime) String() string { return proto.CompactTextString(m) } -func (*Runtime) ProtoMessage() {} -func (*Runtime) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{5} -} -func (m *Runtime) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Runtime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Runtime.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Runtime) XXX_Merge(src proto.Message) { - xxx_messageInfo_Runtime.Merge(m, src) -} -func (m *Runtime) XXX_Size() int { - return m.Size() -} -func (m *Runtime) XXX_DiscardUnknown() { - xxx_messageInfo_Runtime.DiscardUnknown(m) -} - -var xxx_messageInfo_Runtime proto.InternalMessageInfo - -func (m *Runtime) GetImplementation() string { - if m != nil { - return m.Implementation - } - return "" -} - -func (m *Runtime) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func (m *Runtime) GetPlatform() string { - if m != nil { - return m.Platform - } - return "" -} - -func (m *Runtime) GetPeerId() string { - if m != nil { - return m.PeerId - } - return "" -} - -func (m *Runtime) GetEventTypes() []*EventType { - if m != nil { - return m.EventTypes - } - return nil -} - -// EndpointPair is a pair of multiaddrs. -type EndpointPair struct { - // the source multiaddr. - SrcMultiaddr string `protobuf:"bytes,1,opt,name=src_multiaddr,json=srcMultiaddr,proto3" json:"src_multiaddr,omitempty"` - // the destination multiaddr. - DstMultiaddr string `protobuf:"bytes,2,opt,name=dst_multiaddr,json=dstMultiaddr,proto3" json:"dst_multiaddr,omitempty"` -} - -func (m *EndpointPair) Reset() { *m = EndpointPair{} } -func (m *EndpointPair) String() string { return proto.CompactTextString(m) } -func (*EndpointPair) ProtoMessage() {} -func (*EndpointPair) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{6} -} -func (m *EndpointPair) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EndpointPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_EndpointPair.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *EndpointPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_EndpointPair.Merge(m, src) -} -func (m *EndpointPair) XXX_Size() int { - return m.Size() -} -func (m *EndpointPair) XXX_DiscardUnknown() { - xxx_messageInfo_EndpointPair.DiscardUnknown(m) -} - -var xxx_messageInfo_EndpointPair proto.InternalMessageInfo - -func (m *EndpointPair) GetSrcMultiaddr() string { - if m != nil { - return m.SrcMultiaddr - } - return "" -} - -func (m *EndpointPair) GetDstMultiaddr() string { - if m != nil { - return m.DstMultiaddr - } - return "" -} - -// Traffic encloses data transfer statistics. -type Traffic struct { - // snapshot of the data in metrics. - TrafficIn *DataGauge `protobuf:"bytes,1,opt,name=traffic_in,json=trafficIn,proto3" json:"traffic_in,omitempty"` - // snapshot of the data out metrics. - TrafficOut *DataGauge `protobuf:"bytes,2,opt,name=traffic_out,json=trafficOut,proto3" json:"traffic_out,omitempty"` -} - -func (m *Traffic) Reset() { *m = Traffic{} } -func (m *Traffic) String() string { return proto.CompactTextString(m) } -func (*Traffic) ProtoMessage() {} -func (*Traffic) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{7} -} -func (m *Traffic) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Traffic) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Traffic.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Traffic) XXX_Merge(src proto.Message) { - xxx_messageInfo_Traffic.Merge(m, src) -} -func (m *Traffic) XXX_Size() int { - return m.Size() -} -func (m *Traffic) XXX_DiscardUnknown() { - xxx_messageInfo_Traffic.DiscardUnknown(m) -} - -var xxx_messageInfo_Traffic proto.InternalMessageInfo - -func (m *Traffic) GetTrafficIn() *DataGauge { - if m != nil { - return m.TrafficIn - } - return nil -} - -func (m *Traffic) GetTrafficOut() *DataGauge { - if m != nil { - return m.TrafficOut - } - return nil -} - -// a list of streams, by reference or inlined. -type StreamList struct { - // NOTE: only one of the next 2 fields can appear, but proto3 - // doesn't support combining oneof and repeated. - // - // streams within this connection by reference. - StreamIds [][]byte `protobuf:"bytes,1,rep,name=stream_ids,json=streamIds,proto3" json:"stream_ids,omitempty"` - // streams within this connection by inlining. - Streams []*Stream `protobuf:"bytes,2,rep,name=streams,proto3" json:"streams,omitempty"` -} - -func (m *StreamList) Reset() { *m = StreamList{} } -func (m *StreamList) String() string { return proto.CompactTextString(m) } -func (*StreamList) ProtoMessage() {} -func (*StreamList) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{8} -} -func (m *StreamList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StreamList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StreamList.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StreamList) XXX_Merge(src proto.Message) { - xxx_messageInfo_StreamList.Merge(m, src) -} -func (m *StreamList) XXX_Size() int { - return m.Size() -} -func (m *StreamList) XXX_DiscardUnknown() { - xxx_messageInfo_StreamList.DiscardUnknown(m) -} - -var xxx_messageInfo_StreamList proto.InternalMessageInfo - -func (m *StreamList) GetStreamIds() [][]byte { - if m != nil { - return m.StreamIds - } - return nil -} - -func (m *StreamList) GetStreams() []*Stream { - if m != nil { - return m.Streams - } - return nil -} - -// Connection reports metrics and state of a libp2p connection. -type Connection struct { - // the id of this connection, not to be shown in user tooling, - // used for (cross)referencing connections (e.g. relay). - Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // the peer id of the other party. - PeerId string `protobuf:"bytes,2,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"` - // the status of this connection. - Status Status `protobuf:"varint,3,opt,name=status,proto3,enum=pb.Status" json:"status,omitempty"` - // a reference to the transport managing this connection. - TransportId []byte `protobuf:"bytes,4,opt,name=transport_id,json=transportId,proto3" json:"transport_id,omitempty"` - // the endpoints participating in this connection. - Endpoints *EndpointPair `protobuf:"bytes,5,opt,name=endpoints,proto3" json:"endpoints,omitempty"` - // the timeline of the connection, see Connection.Timeline. - Timeline *Connection_Timeline `protobuf:"bytes,6,opt,name=timeline,proto3" json:"timeline,omitempty"` - // our role in this connection. - Role Role `protobuf:"varint,7,opt,name=role,proto3,enum=pb.Role" json:"role,omitempty"` - // traffic statistics. - Traffic *Traffic `protobuf:"bytes,8,opt,name=traffic,proto3" json:"traffic,omitempty"` - // properties of this connection. - Attribs *Connection_Attributes `protobuf:"bytes,9,opt,name=attribs,proto3" json:"attribs,omitempty"` - // the instantaneous latency of this connection in nanoseconds. - LatencyNs uint64 `protobuf:"varint,10,opt,name=latency_ns,json=latencyNs,proto3" json:"latency_ns,omitempty"` - // streams within this connection. - Streams *StreamList `protobuf:"bytes,11,opt,name=streams,proto3" json:"streams,omitempty"` - // if this is a relayed connection, this points to the relaying connection. - // a default value here (empty bytes) indicates this is not a relayed connection. - // - // Types that are valid to be assigned to RelayedOver: - // *Connection_ConnId - // *Connection_Conn - RelayedOver isConnection_RelayedOver `protobuf_oneof:"relayed_over"` - // user provided tags. - UserProvidedTags []string `protobuf:"bytes,99,rep,name=user_provided_tags,json=userProvidedTags,proto3" json:"user_provided_tags,omitempty"` -} - -func (m *Connection) Reset() { *m = Connection{} } -func (m *Connection) String() string { return proto.CompactTextString(m) } -func (*Connection) ProtoMessage() {} -func (*Connection) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{9} -} -func (m *Connection) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Connection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Connection.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Connection) XXX_Merge(src proto.Message) { - xxx_messageInfo_Connection.Merge(m, src) -} -func (m *Connection) XXX_Size() int { - return m.Size() -} -func (m *Connection) XXX_DiscardUnknown() { - xxx_messageInfo_Connection.DiscardUnknown(m) -} - -var xxx_messageInfo_Connection proto.InternalMessageInfo - -type isConnection_RelayedOver interface { - isConnection_RelayedOver() - MarshalTo([]byte) (int, error) - Size() int -} - -type Connection_ConnId struct { - ConnId []byte `protobuf:"bytes,16,opt,name=conn_id,json=connId,proto3,oneof" json:"conn_id,omitempty"` -} -type Connection_Conn struct { - Conn *Connection `protobuf:"bytes,17,opt,name=conn,proto3,oneof" json:"conn,omitempty"` -} - -func (*Connection_ConnId) isConnection_RelayedOver() {} -func (*Connection_Conn) isConnection_RelayedOver() {} - -func (m *Connection) GetRelayedOver() isConnection_RelayedOver { - if m != nil { - return m.RelayedOver - } - return nil -} - -func (m *Connection) GetId() []byte { - if m != nil { - return m.Id - } - return nil -} - -func (m *Connection) GetPeerId() string { - if m != nil { - return m.PeerId - } - return "" -} - -func (m *Connection) GetStatus() Status { - if m != nil { - return m.Status - } - return Status_ACTIVE -} - -func (m *Connection) GetTransportId() []byte { - if m != nil { - return m.TransportId - } - return nil -} - -func (m *Connection) GetEndpoints() *EndpointPair { - if m != nil { - return m.Endpoints - } - return nil -} - -func (m *Connection) GetTimeline() *Connection_Timeline { - if m != nil { - return m.Timeline - } - return nil -} - -func (m *Connection) GetRole() Role { - if m != nil { - return m.Role - } - return Role_INITIATOR -} - -func (m *Connection) GetTraffic() *Traffic { - if m != nil { - return m.Traffic - } - return nil -} - -func (m *Connection) GetAttribs() *Connection_Attributes { - if m != nil { - return m.Attribs - } - return nil -} - -func (m *Connection) GetLatencyNs() uint64 { - if m != nil { - return m.LatencyNs - } - return 0 -} - -func (m *Connection) GetStreams() *StreamList { - if m != nil { - return m.Streams - } - return nil -} - -func (m *Connection) GetConnId() []byte { - if x, ok := m.GetRelayedOver().(*Connection_ConnId); ok { - return x.ConnId - } - return nil -} - -func (m *Connection) GetConn() *Connection { - if x, ok := m.GetRelayedOver().(*Connection_Conn); ok { - return x.Conn - } - return nil -} - -func (m *Connection) GetUserProvidedTags() []string { - if m != nil { - return m.UserProvidedTags - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Connection) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Connection_ConnId)(nil), - (*Connection_Conn)(nil), - } -} - -// Timeline contains the timestamps (ms since epoch) of the well-known milestones of a connection. -type Connection_Timeline struct { - // the instant when a connection was opened on the wire. - OpenTs uint64 `protobuf:"varint,1,opt,name=open_ts,json=openTs,proto3" json:"open_ts,omitempty"` - // the instant when the upgrade process (handshake, security, multiplexing) finished. - UpgradedTs uint64 `protobuf:"varint,2,opt,name=upgraded_ts,json=upgradedTs,proto3" json:"upgraded_ts,omitempty"` - // the instant when this connection was terminated. - CloseTs uint64 `protobuf:"varint,3,opt,name=close_ts,json=closeTs,proto3" json:"close_ts,omitempty"` -} - -func (m *Connection_Timeline) Reset() { *m = Connection_Timeline{} } -func (m *Connection_Timeline) String() string { return proto.CompactTextString(m) } -func (*Connection_Timeline) ProtoMessage() {} -func (*Connection_Timeline) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{9, 0} -} -func (m *Connection_Timeline) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Connection_Timeline) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Connection_Timeline.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Connection_Timeline) XXX_Merge(src proto.Message) { - xxx_messageInfo_Connection_Timeline.Merge(m, src) -} -func (m *Connection_Timeline) XXX_Size() int { - return m.Size() -} -func (m *Connection_Timeline) XXX_DiscardUnknown() { - xxx_messageInfo_Connection_Timeline.DiscardUnknown(m) -} - -var xxx_messageInfo_Connection_Timeline proto.InternalMessageInfo - -func (m *Connection_Timeline) GetOpenTs() uint64 { - if m != nil { - return m.OpenTs - } - return 0 -} - -func (m *Connection_Timeline) GetUpgradedTs() uint64 { - if m != nil { - return m.UpgradedTs - } - return 0 -} - -func (m *Connection_Timeline) GetCloseTs() uint64 { - if m != nil { - return m.CloseTs - } - return 0 -} - -// Attributes encapsulates the attributes of this connection. -type Connection_Attributes struct { - // the multiplexer being used. - Multiplexer string `protobuf:"bytes,1,opt,name=multiplexer,proto3" json:"multiplexer,omitempty"` - // the encryption method being used. - Encryption string `protobuf:"bytes,2,opt,name=encryption,proto3" json:"encryption,omitempty"` -} - -func (m *Connection_Attributes) Reset() { *m = Connection_Attributes{} } -func (m *Connection_Attributes) String() string { return proto.CompactTextString(m) } -func (*Connection_Attributes) ProtoMessage() {} -func (*Connection_Attributes) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{9, 1} -} -func (m *Connection_Attributes) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Connection_Attributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Connection_Attributes.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Connection_Attributes) XXX_Merge(src proto.Message) { - xxx_messageInfo_Connection_Attributes.Merge(m, src) -} -func (m *Connection_Attributes) XXX_Size() int { - return m.Size() -} -func (m *Connection_Attributes) XXX_DiscardUnknown() { - xxx_messageInfo_Connection_Attributes.DiscardUnknown(m) -} - -var xxx_messageInfo_Connection_Attributes proto.InternalMessageInfo - -func (m *Connection_Attributes) GetMultiplexer() string { - if m != nil { - return m.Multiplexer - } - return "" -} - -func (m *Connection_Attributes) GetEncryption() string { - if m != nil { - return m.Encryption - } - return "" -} - -// Stream reports metrics and state of a libp2p stream. -type Stream struct { - // the id of this stream, not to be shown in user tooling, - // used for (cross)referencing streams. - Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // the protocol pinned to this stream. - Protocol string `protobuf:"bytes,2,opt,name=protocol,proto3" json:"protocol,omitempty"` - // our role in this stream. - Role Role `protobuf:"varint,3,opt,name=role,proto3,enum=pb.Role" json:"role,omitempty"` - // traffic statistics. - Traffic *Traffic `protobuf:"bytes,4,opt,name=traffic,proto3" json:"traffic,omitempty"` - // the connection this stream is hosted under. - Conn *Stream_ConnectionRef `protobuf:"bytes,5,opt,name=conn,proto3" json:"conn,omitempty"` - // the timeline of the stream, see Stream.Timeline. - Timeline *Stream_Timeline `protobuf:"bytes,6,opt,name=timeline,proto3" json:"timeline,omitempty"` - // the status of this stream. - Status Status `protobuf:"varint,7,opt,name=status,proto3,enum=pb.Status" json:"status,omitempty"` - // the instantaneous latency of this stream in nanoseconds. - // TODO: this is hard to calculate. - LatencyNs uint64 `protobuf:"varint,16,opt,name=latency_ns,json=latencyNs,proto3" json:"latency_ns,omitempty"` - // user provided tags. - UserProvidedTags []string `protobuf:"bytes,99,rep,name=user_provided_tags,json=userProvidedTags,proto3" json:"user_provided_tags,omitempty"` -} - -func (m *Stream) Reset() { *m = Stream{} } -func (m *Stream) String() string { return proto.CompactTextString(m) } -func (*Stream) ProtoMessage() {} -func (*Stream) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{10} -} -func (m *Stream) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Stream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Stream.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Stream) XXX_Merge(src proto.Message) { - xxx_messageInfo_Stream.Merge(m, src) -} -func (m *Stream) XXX_Size() int { - return m.Size() -} -func (m *Stream) XXX_DiscardUnknown() { - xxx_messageInfo_Stream.DiscardUnknown(m) -} - -var xxx_messageInfo_Stream proto.InternalMessageInfo - -func (m *Stream) GetId() []byte { - if m != nil { - return m.Id - } - return nil -} - -func (m *Stream) GetProtocol() string { - if m != nil { - return m.Protocol - } - return "" -} - -func (m *Stream) GetRole() Role { - if m != nil { - return m.Role - } - return Role_INITIATOR -} - -func (m *Stream) GetTraffic() *Traffic { - if m != nil { - return m.Traffic - } - return nil -} - -func (m *Stream) GetConn() *Stream_ConnectionRef { - if m != nil { - return m.Conn - } - return nil -} - -func (m *Stream) GetTimeline() *Stream_Timeline { - if m != nil { - return m.Timeline - } - return nil -} - -func (m *Stream) GetStatus() Status { - if m != nil { - return m.Status - } - return Status_ACTIVE -} - -func (m *Stream) GetLatencyNs() uint64 { - if m != nil { - return m.LatencyNs - } - return 0 -} - -func (m *Stream) GetUserProvidedTags() []string { - if m != nil { - return m.UserProvidedTags - } - return nil -} - -type Stream_ConnectionRef struct { - // Types that are valid to be assigned to Connection: - // *Stream_ConnectionRef_Conn - // *Stream_ConnectionRef_ConnId - Connection isStream_ConnectionRef_Connection `protobuf_oneof:"connection"` -} - -func (m *Stream_ConnectionRef) Reset() { *m = Stream_ConnectionRef{} } -func (m *Stream_ConnectionRef) String() string { return proto.CompactTextString(m) } -func (*Stream_ConnectionRef) ProtoMessage() {} -func (*Stream_ConnectionRef) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{10, 0} -} -func (m *Stream_ConnectionRef) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Stream_ConnectionRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Stream_ConnectionRef.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Stream_ConnectionRef) XXX_Merge(src proto.Message) { - xxx_messageInfo_Stream_ConnectionRef.Merge(m, src) -} -func (m *Stream_ConnectionRef) XXX_Size() int { - return m.Size() -} -func (m *Stream_ConnectionRef) XXX_DiscardUnknown() { - xxx_messageInfo_Stream_ConnectionRef.DiscardUnknown(m) -} - -var xxx_messageInfo_Stream_ConnectionRef proto.InternalMessageInfo - -type isStream_ConnectionRef_Connection interface { - isStream_ConnectionRef_Connection() - MarshalTo([]byte) (int, error) - Size() int -} - -type Stream_ConnectionRef_Conn struct { - Conn *Connection `protobuf:"bytes,1,opt,name=conn,proto3,oneof" json:"conn,omitempty"` -} -type Stream_ConnectionRef_ConnId struct { - ConnId []byte `protobuf:"bytes,2,opt,name=conn_id,json=connId,proto3,oneof" json:"conn_id,omitempty"` -} - -func (*Stream_ConnectionRef_Conn) isStream_ConnectionRef_Connection() {} -func (*Stream_ConnectionRef_ConnId) isStream_ConnectionRef_Connection() {} - -func (m *Stream_ConnectionRef) GetConnection() isStream_ConnectionRef_Connection { - if m != nil { - return m.Connection - } - return nil -} - -func (m *Stream_ConnectionRef) GetConn() *Connection { - if x, ok := m.GetConnection().(*Stream_ConnectionRef_Conn); ok { - return x.Conn - } - return nil -} - -func (m *Stream_ConnectionRef) GetConnId() []byte { - if x, ok := m.GetConnection().(*Stream_ConnectionRef_ConnId); ok { - return x.ConnId - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Stream_ConnectionRef) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Stream_ConnectionRef_Conn)(nil), - (*Stream_ConnectionRef_ConnId)(nil), - } -} - -// Timeline contains the timestamps (ms since epoch) of the well-known milestones of a stream. -type Stream_Timeline struct { - // the instant when the stream was opened. - OpenTs uint64 `protobuf:"varint,1,opt,name=open_ts,json=openTs,proto3" json:"open_ts,omitempty"` - // the instant when the stream was terminated. - CloseTs uint64 `protobuf:"varint,2,opt,name=close_ts,json=closeTs,proto3" json:"close_ts,omitempty"` -} - -func (m *Stream_Timeline) Reset() { *m = Stream_Timeline{} } -func (m *Stream_Timeline) String() string { return proto.CompactTextString(m) } -func (*Stream_Timeline) ProtoMessage() {} -func (*Stream_Timeline) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{10, 1} -} -func (m *Stream_Timeline) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Stream_Timeline) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Stream_Timeline.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Stream_Timeline) XXX_Merge(src proto.Message) { - xxx_messageInfo_Stream_Timeline.Merge(m, src) -} -func (m *Stream_Timeline) XXX_Size() int { - return m.Size() -} -func (m *Stream_Timeline) XXX_DiscardUnknown() { - xxx_messageInfo_Stream_Timeline.DiscardUnknown(m) -} - -var xxx_messageInfo_Stream_Timeline proto.InternalMessageInfo - -func (m *Stream_Timeline) GetOpenTs() uint64 { - if m != nil { - return m.OpenTs - } - return 0 -} - -func (m *Stream_Timeline) GetCloseTs() uint64 { - if m != nil { - return m.CloseTs - } - return 0 -} - -// DHT metrics and state. -type DHT struct { - // DHT protocol name - Protocol string `protobuf:"bytes,1,opt,name=protocol,proto3" json:"protocol,omitempty"` - // protocol enabled. - Enabled bool `protobuf:"varint,2,opt,name=enabled,proto3" json:"enabled,omitempty"` - // timestamp (ms since epoch) of start up. - StartTs uint64 `protobuf:"varint,3,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"` - // params of the dht. - Params *DHT_Params `protobuf:"bytes,4,opt,name=params,proto3" json:"params,omitempty"` - // existing, intantiated buckets and their contents - Buckets []*DHT_Bucket `protobuf:"bytes,5,rep,name=buckets,proto3" json:"buckets,omitempty"` - // counts inbound queries received from other peers - IncomingQueries *DHT_QueryGauge `protobuf:"bytes,6,opt,name=incoming_queries,json=incomingQueries,proto3" json:"incoming_queries,omitempty"` - // counts outbound queries dispatched by this peer - OutgoingQueries *DHT_QueryGauge `protobuf:"bytes,7,opt,name=outgoing_queries,json=outgoingQueries,proto3" json:"outgoing_queries,omitempty"` -} - -func (m *DHT) Reset() { *m = DHT{} } -func (m *DHT) String() string { return proto.CompactTextString(m) } -func (*DHT) ProtoMessage() {} -func (*DHT) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{11} -} -func (m *DHT) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DHT) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DHT.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DHT) XXX_Merge(src proto.Message) { - xxx_messageInfo_DHT.Merge(m, src) -} -func (m *DHT) XXX_Size() int { - return m.Size() -} -func (m *DHT) XXX_DiscardUnknown() { - xxx_messageInfo_DHT.DiscardUnknown(m) -} - -var xxx_messageInfo_DHT proto.InternalMessageInfo - -func (m *DHT) GetProtocol() string { - if m != nil { - return m.Protocol - } - return "" -} - -func (m *DHT) GetEnabled() bool { - if m != nil { - return m.Enabled - } - return false -} - -func (m *DHT) GetStartTs() uint64 { - if m != nil { - return m.StartTs - } - return 0 -} - -func (m *DHT) GetParams() *DHT_Params { - if m != nil { - return m.Params - } - return nil -} - -func (m *DHT) GetBuckets() []*DHT_Bucket { - if m != nil { - return m.Buckets - } - return nil -} - -func (m *DHT) GetIncomingQueries() *DHT_QueryGauge { - if m != nil { - return m.IncomingQueries - } - return nil -} - -func (m *DHT) GetOutgoingQueries() *DHT_QueryGauge { - if m != nil { - return m.OutgoingQueries - } - return nil -} - -type DHT_Params struct { - // routing table bucket size. - K uint64 `protobuf:"varint,1,opt,name=k,proto3" json:"k,omitempty"` - // concurrency of asynchronous requests. - Alpha uint64 `protobuf:"varint,2,opt,name=alpha,proto3" json:"alpha,omitempty"` - // number of disjoint paths to use. - DisjointPaths uint64 `protobuf:"varint,3,opt,name=disjoint_paths,json=disjointPaths,proto3" json:"disjoint_paths,omitempty"` - // number of peers closest to a target that must have responded - // in order for a given query path to complete - Beta uint64 `protobuf:"varint,4,opt,name=beta,proto3" json:"beta,omitempty"` -} - -func (m *DHT_Params) Reset() { *m = DHT_Params{} } -func (m *DHT_Params) String() string { return proto.CompactTextString(m) } -func (*DHT_Params) ProtoMessage() {} -func (*DHT_Params) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{11, 0} -} -func (m *DHT_Params) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DHT_Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DHT_Params.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DHT_Params) XXX_Merge(src proto.Message) { - xxx_messageInfo_DHT_Params.Merge(m, src) -} -func (m *DHT_Params) XXX_Size() int { - return m.Size() -} -func (m *DHT_Params) XXX_DiscardUnknown() { - xxx_messageInfo_DHT_Params.DiscardUnknown(m) -} - -var xxx_messageInfo_DHT_Params proto.InternalMessageInfo - -func (m *DHT_Params) GetK() uint64 { - if m != nil { - return m.K - } - return 0 -} - -func (m *DHT_Params) GetAlpha() uint64 { - if m != nil { - return m.Alpha - } - return 0 -} - -func (m *DHT_Params) GetDisjointPaths() uint64 { - if m != nil { - return m.DisjointPaths - } - return 0 -} - -func (m *DHT_Params) GetBeta() uint64 { - if m != nil { - return m.Beta - } - return 0 -} - -// Peer in DHT -type DHT_PeerInDHT struct { - // the peer id of the host system - PeerId string `protobuf:"bytes,1,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"` - // the peer's status when data snapshot is taken - Status DHT_PeerInDHT_Status `protobuf:"varint,2,opt,name=status,proto3,enum=pb.DHT_PeerInDHT_Status" json:"status,omitempty"` - // age in bucket (ms) - AgeInBucket uint32 `protobuf:"varint,3,opt,name=age_in_bucket,json=ageInBucket,proto3" json:"age_in_bucket,omitempty"` -} - -func (m *DHT_PeerInDHT) Reset() { *m = DHT_PeerInDHT{} } -func (m *DHT_PeerInDHT) String() string { return proto.CompactTextString(m) } -func (*DHT_PeerInDHT) ProtoMessage() {} -func (*DHT_PeerInDHT) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{11, 1} -} -func (m *DHT_PeerInDHT) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DHT_PeerInDHT) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DHT_PeerInDHT.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DHT_PeerInDHT) XXX_Merge(src proto.Message) { - xxx_messageInfo_DHT_PeerInDHT.Merge(m, src) -} -func (m *DHT_PeerInDHT) XXX_Size() int { - return m.Size() -} -func (m *DHT_PeerInDHT) XXX_DiscardUnknown() { - xxx_messageInfo_DHT_PeerInDHT.DiscardUnknown(m) -} - -var xxx_messageInfo_DHT_PeerInDHT proto.InternalMessageInfo - -func (m *DHT_PeerInDHT) GetPeerId() string { - if m != nil { - return m.PeerId - } - return "" -} - -func (m *DHT_PeerInDHT) GetStatus() DHT_PeerInDHT_Status { - if m != nil { - return m.Status - } - return DHT_PeerInDHT_ACTIVE -} - -func (m *DHT_PeerInDHT) GetAgeInBucket() uint32 { - if m != nil { - return m.AgeInBucket - } - return 0 -} - -// A "k-bucket" containing peers of a certain kadamelia distance -type DHT_Bucket struct { - // CPL (Common Prefix Length) is the length of the common prefix - // between the ids of every peer in this bucket and the DHT peer id - Cpl uint32 `protobuf:"varint,1,opt,name=cpl,proto3" json:"cpl,omitempty"` - // Peers associated with this bucket - Peers []*DHT_PeerInDHT `protobuf:"bytes,2,rep,name=peers,proto3" json:"peers,omitempty"` -} - -func (m *DHT_Bucket) Reset() { *m = DHT_Bucket{} } -func (m *DHT_Bucket) String() string { return proto.CompactTextString(m) } -func (*DHT_Bucket) ProtoMessage() {} -func (*DHT_Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{11, 2} -} -func (m *DHT_Bucket) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DHT_Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DHT_Bucket.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DHT_Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_DHT_Bucket.Merge(m, src) -} -func (m *DHT_Bucket) XXX_Size() int { - return m.Size() -} -func (m *DHT_Bucket) XXX_DiscardUnknown() { - xxx_messageInfo_DHT_Bucket.DiscardUnknown(m) -} - -var xxx_messageInfo_DHT_Bucket proto.InternalMessageInfo - -func (m *DHT_Bucket) GetCpl() uint32 { - if m != nil { - return m.Cpl - } - return 0 -} - -func (m *DHT_Bucket) GetPeers() []*DHT_PeerInDHT { - if m != nil { - return m.Peers - } - return nil -} - -// Counters of query events, by status -type DHT_QueryGauge struct { - // Cumulative counter of queries with "SUCCESS" status - Success uint64 `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` - // Cumulative counter of queries with "ERROR" status - Error uint64 `protobuf:"varint,2,opt,name=error,proto3" json:"error,omitempty"` - // Cumulative counter of queries with "TIMEOUT" status - Timeout uint64 `protobuf:"varint,3,opt,name=timeout,proto3" json:"timeout,omitempty"` -} - -func (m *DHT_QueryGauge) Reset() { *m = DHT_QueryGauge{} } -func (m *DHT_QueryGauge) String() string { return proto.CompactTextString(m) } -func (*DHT_QueryGauge) ProtoMessage() {} -func (*DHT_QueryGauge) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{11, 3} -} -func (m *DHT_QueryGauge) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DHT_QueryGauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DHT_QueryGauge.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DHT_QueryGauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_DHT_QueryGauge.Merge(m, src) -} -func (m *DHT_QueryGauge) XXX_Size() int { - return m.Size() -} -func (m *DHT_QueryGauge) XXX_DiscardUnknown() { - xxx_messageInfo_DHT_QueryGauge.DiscardUnknown(m) -} - -var xxx_messageInfo_DHT_QueryGauge proto.InternalMessageInfo - -func (m *DHT_QueryGauge) GetSuccess() uint64 { - if m != nil { - return m.Success - } - return 0 -} - -func (m *DHT_QueryGauge) GetError() uint64 { - if m != nil { - return m.Error - } - return 0 -} - -func (m *DHT_QueryGauge) GetTimeout() uint64 { - if m != nil { - return m.Timeout - } - return 0 -} - -// Subsystems encapsulates all instrumented subsystems for a libp2p host. -type Subsystems struct { - // connections data, source agnostic but currently only supports the Swarm subsystem - Connections []*Connection `protobuf:"bytes,1,rep,name=connections,proto3" json:"connections,omitempty"` - // the DHT subsystem. - Dht *DHT `protobuf:"bytes,2,opt,name=dht,proto3" json:"dht,omitempty"` -} - -func (m *Subsystems) Reset() { *m = Subsystems{} } -func (m *Subsystems) String() string { return proto.CompactTextString(m) } -func (*Subsystems) ProtoMessage() {} -func (*Subsystems) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{12} -} -func (m *Subsystems) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Subsystems) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Subsystems.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Subsystems) XXX_Merge(src proto.Message) { - xxx_messageInfo_Subsystems.Merge(m, src) -} -func (m *Subsystems) XXX_Size() int { - return m.Size() -} -func (m *Subsystems) XXX_DiscardUnknown() { - xxx_messageInfo_Subsystems.DiscardUnknown(m) -} - -var xxx_messageInfo_Subsystems proto.InternalMessageInfo - -func (m *Subsystems) GetConnections() []*Connection { - if m != nil { - return m.Connections - } - return nil -} - -func (m *Subsystems) GetDht() *DHT { - if m != nil { - return m.Dht - } - return nil -} - -// Connections and streams output for a time interval is one of these. -type State struct { - // list of connections - Subsystems *Subsystems `protobuf:"bytes,1,opt,name=subsystems,proto3" json:"subsystems,omitempty"` - // overall traffic for this peer - Traffic *Traffic `protobuf:"bytes,2,opt,name=traffic,proto3" json:"traffic,omitempty"` - // moment this data snapshot and instantaneous values were taken - InstantTs uint64 `protobuf:"varint,3,opt,name=instant_ts,json=instantTs,proto3" json:"instant_ts,omitempty"` - // start of included data collection (cumulative values counted from here) - StartTs uint64 `protobuf:"varint,4,opt,name=start_ts,json=startTs,proto3" json:"start_ts,omitempty"` - // length of time up to instant_ts covered by this data snapshot - SnapshotDurationMs uint32 `protobuf:"varint,5,opt,name=snapshot_duration_ms,json=snapshotDurationMs,proto3" json:"snapshot_duration_ms,omitempty"` -} - -func (m *State) Reset() { *m = State{} } -func (m *State) String() string { return proto.CompactTextString(m) } -func (*State) ProtoMessage() {} -func (*State) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{13} -} -func (m *State) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *State) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_State.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *State) XXX_Merge(src proto.Message) { - xxx_messageInfo_State.Merge(m, src) -} -func (m *State) XXX_Size() int { - return m.Size() -} -func (m *State) XXX_DiscardUnknown() { - xxx_messageInfo_State.DiscardUnknown(m) -} - -var xxx_messageInfo_State proto.InternalMessageInfo - -func (m *State) GetSubsystems() *Subsystems { - if m != nil { - return m.Subsystems - } - return nil -} - -func (m *State) GetTraffic() *Traffic { - if m != nil { - return m.Traffic - } - return nil -} - -func (m *State) GetInstantTs() uint64 { - if m != nil { - return m.InstantTs - } - return 0 -} - -func (m *State) GetStartTs() uint64 { - if m != nil { - return m.StartTs - } - return 0 -} - -func (m *State) GetSnapshotDurationMs() uint32 { - if m != nil { - return m.SnapshotDurationMs - } - return 0 -} - -// Event -type Event struct { - // definition of event type, containing only `name` unless this is first encounter of novel event - Type *EventType `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // time this event occurred (ms since epoch) - Ts uint64 `protobuf:"varint,2,opt,name=ts,proto3" json:"ts,omitempty"` - // stringified json; top-level keys and value types match EventProperty definitions - Content string `protobuf:"bytes,3,opt,name=content,proto3" json:"content,omitempty"` -} - -func (m *Event) Reset() { *m = Event{} } -func (m *Event) String() string { return proto.CompactTextString(m) } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{14} -} -func (m *Event) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Event.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Event) XXX_Merge(src proto.Message) { - xxx_messageInfo_Event.Merge(m, src) -} -func (m *Event) XXX_Size() int { - return m.Size() -} -func (m *Event) XXX_DiscardUnknown() { - xxx_messageInfo_Event.DiscardUnknown(m) -} - -var xxx_messageInfo_Event proto.InternalMessageInfo - -func (m *Event) GetType() *EventType { - if m != nil { - return m.Type - } - return nil -} - -func (m *Event) GetTs() uint64 { - if m != nil { - return m.Ts - } - return 0 -} - -func (m *Event) GetContent() string { - if m != nil { - return m.Content - } - return "" -} - -// ServerMessage wraps messages to be sent to clients to allow extension -// based on new types of data sources -type ServerMessage struct { - // Version of this protobuf. - Version *Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - // The payload this message contains. - // - // Types that are valid to be assigned to Payload: - // *ServerMessage_State - // *ServerMessage_Runtime - // *ServerMessage_Event - // *ServerMessage_Response - // *ServerMessage_Notice - Payload isServerMessage_Payload `protobuf_oneof:"payload"` -} - -func (m *ServerMessage) Reset() { *m = ServerMessage{} } -func (m *ServerMessage) String() string { return proto.CompactTextString(m) } -func (*ServerMessage) ProtoMessage() {} -func (*ServerMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{15} -} -func (m *ServerMessage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServerMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ServerMessage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ServerMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServerMessage.Merge(m, src) -} -func (m *ServerMessage) XXX_Size() int { - return m.Size() -} -func (m *ServerMessage) XXX_DiscardUnknown() { - xxx_messageInfo_ServerMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_ServerMessage proto.InternalMessageInfo - -type isServerMessage_Payload interface { - isServerMessage_Payload() - MarshalTo([]byte) (int, error) - Size() int -} - -type ServerMessage_State struct { - State *State `protobuf:"bytes,2,opt,name=state,proto3,oneof" json:"state,omitempty"` -} -type ServerMessage_Runtime struct { - Runtime *Runtime `protobuf:"bytes,3,opt,name=runtime,proto3,oneof" json:"runtime,omitempty"` -} -type ServerMessage_Event struct { - Event *Event `protobuf:"bytes,4,opt,name=event,proto3,oneof" json:"event,omitempty"` -} -type ServerMessage_Response struct { - Response *CommandResponse `protobuf:"bytes,5,opt,name=response,proto3,oneof" json:"response,omitempty"` -} -type ServerMessage_Notice struct { - Notice *ServerNotice `protobuf:"bytes,6,opt,name=notice,proto3,oneof" json:"notice,omitempty"` -} - -func (*ServerMessage_State) isServerMessage_Payload() {} -func (*ServerMessage_Runtime) isServerMessage_Payload() {} -func (*ServerMessage_Event) isServerMessage_Payload() {} -func (*ServerMessage_Response) isServerMessage_Payload() {} -func (*ServerMessage_Notice) isServerMessage_Payload() {} - -func (m *ServerMessage) GetPayload() isServerMessage_Payload { - if m != nil { - return m.Payload - } - return nil -} - -func (m *ServerMessage) GetVersion() *Version { - if m != nil { - return m.Version - } - return nil -} - -func (m *ServerMessage) GetState() *State { - if x, ok := m.GetPayload().(*ServerMessage_State); ok { - return x.State - } - return nil -} - -func (m *ServerMessage) GetRuntime() *Runtime { - if x, ok := m.GetPayload().(*ServerMessage_Runtime); ok { - return x.Runtime - } - return nil -} - -func (m *ServerMessage) GetEvent() *Event { - if x, ok := m.GetPayload().(*ServerMessage_Event); ok { - return x.Event - } - return nil -} - -func (m *ServerMessage) GetResponse() *CommandResponse { - if x, ok := m.GetPayload().(*ServerMessage_Response); ok { - return x.Response - } - return nil -} - -func (m *ServerMessage) GetNotice() *ServerNotice { - if x, ok := m.GetPayload().(*ServerMessage_Notice); ok { - return x.Notice - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*ServerMessage) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*ServerMessage_State)(nil), - (*ServerMessage_Runtime)(nil), - (*ServerMessage_Event)(nil), - (*ServerMessage_Response)(nil), - (*ServerMessage_Notice)(nil), - } -} - -// Configuration encapsulates configuration fields for the protocol and commands. -type Configuration struct { - RetentionPeriodMs uint64 `protobuf:"varint,1,opt,name=retention_period_ms,json=retentionPeriodMs,proto3" json:"retention_period_ms,omitempty"` - StateSnapshotIntervalMs uint64 `protobuf:"varint,2,opt,name=state_snapshot_interval_ms,json=stateSnapshotIntervalMs,proto3" json:"state_snapshot_interval_ms,omitempty"` -} - -func (m *Configuration) Reset() { *m = Configuration{} } -func (m *Configuration) String() string { return proto.CompactTextString(m) } -func (*Configuration) ProtoMessage() {} -func (*Configuration) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{16} -} -func (m *Configuration) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Configuration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Configuration.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Configuration) XXX_Merge(src proto.Message) { - xxx_messageInfo_Configuration.Merge(m, src) -} -func (m *Configuration) XXX_Size() int { - return m.Size() -} -func (m *Configuration) XXX_DiscardUnknown() { - xxx_messageInfo_Configuration.DiscardUnknown(m) -} - -var xxx_messageInfo_Configuration proto.InternalMessageInfo - -func (m *Configuration) GetRetentionPeriodMs() uint64 { - if m != nil { - return m.RetentionPeriodMs - } - return 0 -} - -func (m *Configuration) GetStateSnapshotIntervalMs() uint64 { - if m != nil { - return m.StateSnapshotIntervalMs - } - return 0 -} - -// ClientCommand is a command sent from the client to the server. -type ClientCommand struct { - Version *Version `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` - Command ClientCommand_Command `protobuf:"varint,3,opt,name=command,proto3,enum=pb.ClientCommand_Command" json:"command,omitempty"` - Source ClientCommand_Source `protobuf:"varint,4,opt,name=source,proto3,enum=pb.ClientCommand_Source" json:"source,omitempty"` - Config *Configuration `protobuf:"bytes,5,opt,name=config,proto3" json:"config,omitempty"` -} - -func (m *ClientCommand) Reset() { *m = ClientCommand{} } -func (m *ClientCommand) String() string { return proto.CompactTextString(m) } -func (*ClientCommand) ProtoMessage() {} -func (*ClientCommand) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{17} -} -func (m *ClientCommand) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClientCommand) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ClientCommand.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ClientCommand) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClientCommand.Merge(m, src) -} -func (m *ClientCommand) XXX_Size() int { - return m.Size() -} -func (m *ClientCommand) XXX_DiscardUnknown() { - xxx_messageInfo_ClientCommand.DiscardUnknown(m) -} - -var xxx_messageInfo_ClientCommand proto.InternalMessageInfo - -func (m *ClientCommand) GetVersion() *Version { - if m != nil { - return m.Version - } - return nil -} - -func (m *ClientCommand) GetId() uint64 { - if m != nil { - return m.Id - } - return 0 -} - -func (m *ClientCommand) GetCommand() ClientCommand_Command { - if m != nil { - return m.Command - } - return ClientCommand_HELLO -} - -func (m *ClientCommand) GetSource() ClientCommand_Source { - if m != nil { - return m.Source - } - return ClientCommand_STATE -} - -func (m *ClientCommand) GetConfig() *Configuration { - if m != nil { - return m.Config - } - return nil -} - -// CommandResponse is a response to a command sent by the client. -type CommandResponse struct { - Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - Result CommandResponse_Result `protobuf:"varint,2,opt,name=result,proto3,enum=pb.CommandResponse_Result" json:"result,omitempty"` - Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` - // effective_config is the effective configuration the server holds for - // this connection. It is returned in response to HELLO and UPDATE_CONFIG - // commands. - EffectiveConfig *Configuration `protobuf:"bytes,4,opt,name=effective_config,json=effectiveConfig,proto3" json:"effective_config,omitempty"` -} - -func (m *CommandResponse) Reset() { *m = CommandResponse{} } -func (m *CommandResponse) String() string { return proto.CompactTextString(m) } -func (*CommandResponse) ProtoMessage() {} -func (*CommandResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{18} -} -func (m *CommandResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CommandResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CommandResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CommandResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CommandResponse.Merge(m, src) -} -func (m *CommandResponse) XXX_Size() int { - return m.Size() -} -func (m *CommandResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CommandResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CommandResponse proto.InternalMessageInfo - -func (m *CommandResponse) GetId() uint64 { - if m != nil { - return m.Id - } - return 0 -} - -func (m *CommandResponse) GetResult() CommandResponse_Result { - if m != nil { - return m.Result - } - return CommandResponse_OK -} - -func (m *CommandResponse) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -func (m *CommandResponse) GetEffectiveConfig() *Configuration { - if m != nil { - return m.EffectiveConfig - } - return nil -} - -// ServerNotice represents a NOTICE sent from the server to the client. -type ServerNotice struct { - Kind ServerNotice_Kind `protobuf:"varint,1,opt,name=kind,proto3,enum=pb.ServerNotice_Kind" json:"kind,omitempty"` -} - -func (m *ServerNotice) Reset() { *m = ServerNotice{} } -func (m *ServerNotice) String() string { return proto.CompactTextString(m) } -func (*ServerNotice) ProtoMessage() {} -func (*ServerNotice) Descriptor() ([]byte, []int) { - return fileDescriptor_53a8bedf9a75e10a, []int{19} -} -func (m *ServerNotice) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServerNotice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ServerNotice.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ServerNotice) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServerNotice.Merge(m, src) -} -func (m *ServerNotice) XXX_Size() int { - return m.Size() -} -func (m *ServerNotice) XXX_DiscardUnknown() { - xxx_messageInfo_ServerNotice.DiscardUnknown(m) -} - -var xxx_messageInfo_ServerNotice proto.InternalMessageInfo - -func (m *ServerNotice) GetKind() ServerNotice_Kind { - if m != nil { - return m.Kind - } - return ServerNotice_DISCARDING_EVENTS -} - -func init() { - proto.RegisterEnum("pb.Status", Status_name, Status_value) - proto.RegisterEnum("pb.Role", Role_name, Role_value) - proto.RegisterEnum("pb.EventType_EventProperty_PropertyType", EventType_EventProperty_PropertyType_name, EventType_EventProperty_PropertyType_value) - proto.RegisterEnum("pb.DHT_PeerInDHT_Status", DHT_PeerInDHT_Status_name, DHT_PeerInDHT_Status_value) - proto.RegisterEnum("pb.ClientCommand_Source", ClientCommand_Source_name, ClientCommand_Source_value) - proto.RegisterEnum("pb.ClientCommand_Command", ClientCommand_Command_name, ClientCommand_Command_value) - proto.RegisterEnum("pb.CommandResponse_Result", CommandResponse_Result_name, CommandResponse_Result_value) - proto.RegisterEnum("pb.ServerNotice_Kind", ServerNotice_Kind_name, ServerNotice_Kind_value) - proto.RegisterType((*Version)(nil), "pb.Version") - proto.RegisterType((*ResultCounter)(nil), "pb.ResultCounter") - proto.RegisterType((*SlidingCounter)(nil), "pb.SlidingCounter") - proto.RegisterType((*DataGauge)(nil), "pb.DataGauge") - proto.RegisterType((*EventType)(nil), "pb.EventType") - proto.RegisterType((*EventType_EventProperty)(nil), "pb.EventType.EventProperty") - proto.RegisterType((*Runtime)(nil), "pb.Runtime") - proto.RegisterType((*EndpointPair)(nil), "pb.EndpointPair") - proto.RegisterType((*Traffic)(nil), "pb.Traffic") - proto.RegisterType((*StreamList)(nil), "pb.StreamList") - proto.RegisterType((*Connection)(nil), "pb.Connection") - proto.RegisterType((*Connection_Timeline)(nil), "pb.Connection.Timeline") - proto.RegisterType((*Connection_Attributes)(nil), "pb.Connection.Attributes") - proto.RegisterType((*Stream)(nil), "pb.Stream") - proto.RegisterType((*Stream_ConnectionRef)(nil), "pb.Stream.ConnectionRef") - proto.RegisterType((*Stream_Timeline)(nil), "pb.Stream.Timeline") - proto.RegisterType((*DHT)(nil), "pb.DHT") - proto.RegisterType((*DHT_Params)(nil), "pb.DHT.Params") - proto.RegisterType((*DHT_PeerInDHT)(nil), "pb.DHT.PeerInDHT") - proto.RegisterType((*DHT_Bucket)(nil), "pb.DHT.Bucket") - proto.RegisterType((*DHT_QueryGauge)(nil), "pb.DHT.QueryGauge") - proto.RegisterType((*Subsystems)(nil), "pb.Subsystems") - proto.RegisterType((*State)(nil), "pb.State") - proto.RegisterType((*Event)(nil), "pb.Event") - proto.RegisterType((*ServerMessage)(nil), "pb.ServerMessage") - proto.RegisterType((*Configuration)(nil), "pb.Configuration") - proto.RegisterType((*ClientCommand)(nil), "pb.ClientCommand") - proto.RegisterType((*CommandResponse)(nil), "pb.CommandResponse") - proto.RegisterType((*ServerNotice)(nil), "pb.ServerNotice") -} - -func init() { proto.RegisterFile("introspection.proto", fileDescriptor_53a8bedf9a75e10a) } - -var fileDescriptor_53a8bedf9a75e10a = []byte{ - // 2207 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x58, 0x5b, 0x6f, 0x1b, 0xc7, - 0x15, 0xe6, 0xfd, 0x72, 0x78, 0xf1, 0x6a, 0x9c, 0x20, 0xb4, 0x52, 0xab, 0xf6, 0xc6, 0x49, 0x14, - 0xc3, 0x50, 0x6d, 0x3a, 0x06, 0x02, 0x34, 0x0d, 0x20, 0x89, 0x5b, 0x8b, 0x8e, 0x44, 0xd1, 0x43, - 0xca, 0x68, 0xfb, 0xd0, 0xc5, 0x8a, 0x3b, 0x22, 0xb7, 0x22, 0x77, 0xb7, 0x33, 0x43, 0x25, 0x02, - 0xfa, 0xd0, 0x7f, 0xd0, 0xfe, 0x82, 0xfe, 0x86, 0x3e, 0xf7, 0xad, 0x40, 0x1f, 0x8a, 0x3e, 0xe5, - 0xb1, 0x68, 0x5f, 0x0a, 0xfb, 0x29, 0xff, 0xa2, 0x38, 0x33, 0xb3, 0x17, 0xc9, 0x97, 0xa6, 0x6f, - 0x7b, 0xce, 0xf7, 0x9d, 0xb3, 0xb3, 0xe7, 0x36, 0x87, 0x84, 0x9b, 0x41, 0x28, 0x79, 0x24, 0x62, - 0x36, 0x93, 0x41, 0x14, 0xee, 0xc4, 0x3c, 0x92, 0x11, 0x29, 0xc5, 0xa7, 0xf6, 0x47, 0x50, 0x7f, - 0xc1, 0xb8, 0x08, 0xa2, 0x90, 0xf4, 0xa0, 0x7e, 0xa1, 0x1f, 0x7b, 0xc5, 0x3b, 0xc5, 0xed, 0x0e, - 0x4d, 0x44, 0xfb, 0x29, 0x74, 0x28, 0x13, 0xeb, 0xa5, 0xdc, 0x8f, 0xd6, 0xa1, 0x64, 0x9c, 0xbc, - 0x07, 0x55, 0x19, 0x49, 0x6f, 0x69, 0x88, 0x5a, 0x20, 0x5d, 0x28, 0x45, 0xe7, 0xbd, 0x92, 0x52, - 0x95, 0xa2, 0x73, 0x62, 0x41, 0x99, 0x71, 0xde, 0x2b, 0x2b, 0x05, 0x3e, 0xda, 0x7f, 0x2a, 0x41, - 0x77, 0xb2, 0x0c, 0xfc, 0x20, 0x9c, 0x27, 0xae, 0x3e, 0x80, 0x7a, 0x74, 0xc1, 0xb8, 0xfb, 0x68, - 0x65, 0x9c, 0xd5, 0x50, 0x7c, 0xb4, 0x4a, 0x81, 0x27, 0x2b, 0xe3, 0x52, 0x01, 0x4f, 0x56, 0xe4, - 0x16, 0x34, 0xb4, 0xc5, 0x93, 0x95, 0xf1, 0xad, 0x88, 0x8f, 0x72, 0xd0, 0xe3, 0x87, 0xab, 0x5e, - 0x25, 0x83, 0x1e, 0x3f, 0xcc, 0x59, 0x2d, 0x78, 0xaf, 0x9a, 0xb3, 0x5a, 0xf0, 0x14, 0xea, 0x2f, - 0x78, 0xaf, 0x96, 0x41, 0xfd, 0x1c, 0xf4, 0xf9, 0x82, 0xf7, 0xea, 0x19, 0xf4, 0x79, 0x0e, 0xfa, - 0x62, 0xc1, 0x7b, 0x8d, 0x0c, 0xfa, 0x62, 0xc1, 0xc9, 0x87, 0xd0, 0xd4, 0xef, 0x42, 0x8f, 0x4d, - 0x85, 0x29, 0x2e, 0xca, 0x29, 0xd8, 0x47, 0x9f, 0x90, 0x81, 0x28, 0xdb, 0xa7, 0xd0, 0x1c, 0x78, - 0xd2, 0x7b, 0xea, 0xad, 0xe7, 0x0c, 0x99, 0xb3, 0xf5, 0xca, 0x3d, 0xbd, 0x94, 0x4c, 0xa8, 0xe0, - 0x54, 0x68, 0x63, 0xb6, 0x5e, 0xed, 0xa1, 0x4c, 0x7e, 0x0c, 0x2d, 0x04, 0x63, 0x6f, 0x76, 0xce, - 0xa4, 0x50, 0x21, 0xaa, 0x50, 0x98, 0xad, 0x57, 0x63, 0xad, 0xc1, 0xf8, 0x05, 0xa1, 0x90, 0xee, - 0xe9, 0x37, 0x2a, 0x4a, 0x15, 0x5a, 0x43, 0x71, 0xef, 0x1b, 0xfb, 0xaf, 0x25, 0x68, 0x3a, 0x17, - 0x2c, 0x94, 0xd3, 0xcb, 0x98, 0x11, 0x02, 0x95, 0xd0, 0x5b, 0x31, 0xe5, 0xbf, 0x49, 0xd5, 0x33, - 0xd9, 0x83, 0x6e, 0xcc, 0xa3, 0x98, 0x71, 0x79, 0xe9, 0xca, 0xcb, 0x98, 0xa1, 0xfb, 0xf2, 0x76, - 0xab, 0xff, 0xe1, 0x4e, 0x7c, 0xba, 0x93, 0x9a, 0xea, 0xa7, 0xb1, 0x21, 0xd2, 0x4e, 0x62, 0x82, - 0x98, 0xd8, 0xfc, 0x77, 0x11, 0x3a, 0x57, 0x08, 0x6f, 0x7c, 0xd3, 0x97, 0x50, 0xc1, 0x17, 0xa8, - 0xe3, 0x77, 0xfb, 0xdb, 0xef, 0xf0, 0xbf, 0x33, 0xce, 0xb9, 0xa7, 0xca, 0x8a, 0xdc, 0x85, 0xf6, - 0xc2, 0x13, 0xee, 0x6a, 0xbd, 0x94, 0x41, 0xbc, 0x64, 0xea, 0x3b, 0x1b, 0xb4, 0xb5, 0xf0, 0xc4, - 0x91, 0x51, 0xd9, 0x27, 0xd0, 0xce, 0x1b, 0x12, 0x80, 0xda, 0x64, 0x4a, 0x87, 0xa3, 0xa7, 0x56, - 0x01, 0x9f, 0x47, 0x27, 0x47, 0x7b, 0x0e, 0xb5, 0x8a, 0xa4, 0x01, 0x95, 0xe9, 0xf0, 0xc8, 0xb1, - 0x00, 0xb5, 0x63, 0xc7, 0xa1, 0xc3, 0x81, 0xd5, 0x22, 0x1d, 0x68, 0x1e, 0x9d, 0x1c, 0x4e, 0x87, - 0xbb, 0x83, 0x01, 0xb5, 0xda, 0x48, 0x7a, 0x36, 0x39, 0x1e, 0x59, 0xbf, 0xb2, 0xff, 0x5c, 0x84, - 0x3a, 0x5d, 0x87, 0x32, 0x58, 0x31, 0xf2, 0x09, 0x74, 0x83, 0x55, 0xbc, 0x64, 0x2b, 0x16, 0x4a, - 0x4f, 0x26, 0xed, 0xd3, 0xa4, 0xd7, 0xb4, 0xf9, 0xfe, 0x2a, 0x29, 0x42, 0x22, 0x92, 0x4d, 0x68, - 0xc4, 0x4b, 0x4f, 0x9e, 0x45, 0x5c, 0x57, 0x74, 0x93, 0xa6, 0x32, 0xa6, 0x31, 0x66, 0x8c, 0xbb, - 0x81, 0xaf, 0x2a, 0xba, 0x49, 0x6b, 0x28, 0x0e, 0x7d, 0xb2, 0x03, 0x2d, 0x86, 0x01, 0x32, 0x19, - 0xaa, 0xab, 0x0c, 0x75, 0xae, 0x44, 0x90, 0x02, 0x4b, 0x1e, 0x85, 0xfd, 0x0b, 0x68, 0x3b, 0xa1, - 0x1f, 0x47, 0x41, 0x28, 0xc7, 0x5e, 0xc0, 0xc9, 0x47, 0xd0, 0x11, 0x7c, 0xa6, 0x83, 0xe7, 0xf9, - 0x3e, 0x37, 0xa7, 0x6e, 0x0b, 0x3e, 0x3b, 0x4a, 0x74, 0x48, 0xf2, 0x85, 0xcc, 0x91, 0xf4, 0xc9, - 0xdb, 0xbe, 0x90, 0x29, 0xc9, 0x9e, 0x43, 0x7d, 0xca, 0xbd, 0xb3, 0xb3, 0x60, 0x46, 0x1e, 0x00, - 0x48, 0xfd, 0xe8, 0x06, 0x3a, 0x0e, 0xe6, 0x4c, 0x69, 0x55, 0xd3, 0xa6, 0x21, 0x0c, 0x43, 0xfc, - 0x84, 0x84, 0x1d, 0xad, 0xa5, 0xf2, 0xfd, 0x1a, 0x3d, 0xf1, 0x77, 0xbc, 0x96, 0xf6, 0x73, 0x80, - 0x89, 0xe4, 0xcc, 0x5b, 0x1d, 0x06, 0x42, 0x92, 0xdb, 0x00, 0x42, 0x49, 0x6e, 0xe0, 0x63, 0x7f, - 0x94, 0xb7, 0xdb, 0xb4, 0xa9, 0x35, 0x43, 0x5f, 0x90, 0x7b, 0x50, 0xd7, 0x42, 0x52, 0xbd, 0x80, - 0x8e, 0xb5, 0x3d, 0x4d, 0x20, 0xfb, 0x5f, 0x55, 0x80, 0xfd, 0x28, 0x0c, 0xf5, 0x60, 0xc4, 0x11, - 0x16, 0xf8, 0xea, 0xdc, 0x6d, 0x5a, 0x0a, 0xfc, 0x7c, 0xf4, 0x4b, 0x57, 0xa2, 0x6f, 0x43, 0x4d, - 0x48, 0x4f, 0xae, 0x85, 0x4a, 0x58, 0x37, 0x71, 0x8e, 0x1a, 0x6a, 0x10, 0x2c, 0x4f, 0xc9, 0xbd, - 0x50, 0xc4, 0x11, 0x97, 0x49, 0xfe, 0xda, 0xb4, 0x95, 0xea, 0x54, 0x12, 0x9b, 0xcc, 0x24, 0x45, - 0xa8, 0xb1, 0xd4, 0xea, 0x5b, 0x2a, 0x85, 0xb9, 0x4c, 0xd1, 0x8c, 0x42, 0x1e, 0x43, 0x03, 0x6b, - 0x6e, 0x19, 0x84, 0x4c, 0x8d, 0xaa, 0x56, 0xff, 0x03, 0xa4, 0x67, 0x5f, 0xb0, 0x33, 0x35, 0x30, - 0x4d, 0x89, 0xe4, 0x47, 0x50, 0xe1, 0xd1, 0x92, 0xa9, 0x01, 0xd6, 0xed, 0x37, 0xd0, 0x80, 0x46, - 0x4b, 0x46, 0x95, 0x96, 0x7c, 0x0c, 0x75, 0x13, 0x62, 0x35, 0xc6, 0x5a, 0xfd, 0x16, 0x12, 0x4c, - 0x42, 0x69, 0x82, 0x91, 0xc7, 0x50, 0xf7, 0xa4, 0xe4, 0xc1, 0xa9, 0x50, 0x13, 0xad, 0xd5, 0xbf, - 0x75, 0xed, 0xc5, 0xbb, 0x0a, 0x5d, 0x4b, 0x26, 0x68, 0xc2, 0xc4, 0x14, 0x2d, 0x3d, 0xc9, 0xc2, - 0xd9, 0xa5, 0x1b, 0x0a, 0x35, 0xec, 0x2a, 0xb4, 0x69, 0x34, 0x23, 0x41, 0xb6, 0xb3, 0x14, 0xb5, - 0x94, 0xcf, 0x6e, 0x96, 0x22, 0x4c, 0x71, 0x9a, 0x26, 0x72, 0x0b, 0xea, 0xb3, 0x28, 0x0c, 0x31, - 0x8a, 0x16, 0x46, 0xf1, 0xa0, 0x40, 0x6b, 0xa8, 0x18, 0xfa, 0xe4, 0x1e, 0x54, 0xf0, 0xa9, 0xb7, - 0x91, 0x79, 0xc8, 0x4e, 0x75, 0x50, 0xa0, 0x0a, 0x25, 0x0f, 0x80, 0xac, 0x05, 0xe3, 0x6e, 0xcc, - 0xa3, 0x8b, 0xc0, 0x67, 0xbe, 0x2b, 0xbd, 0xb9, 0xe8, 0xcd, 0xee, 0x94, 0xb7, 0x9b, 0xd4, 0x42, - 0x64, 0x6c, 0x80, 0xa9, 0x37, 0x17, 0x9b, 0x2e, 0x34, 0x92, 0x38, 0xaa, 0x7b, 0x28, 0x66, 0xa1, - 0x2b, 0x93, 0x19, 0x5c, 0x43, 0x71, 0xaa, 0x26, 0xf0, 0x3a, 0x9e, 0x73, 0x4f, 0x79, 0x4b, 0x27, - 0x70, 0xa2, 0x9a, 0xe2, 0xa1, 0x1b, 0xb3, 0x65, 0x24, 0x18, 0xa2, 0x7a, 0x04, 0xd7, 0x95, 0x3c, - 0x15, 0x9b, 0x23, 0x80, 0x2c, 0x5e, 0xe4, 0x0e, 0xb4, 0x92, 0x19, 0xf6, 0x2d, 0x4b, 0x1a, 0x31, - 0xaf, 0x22, 0x5b, 0x00, 0x2c, 0x9c, 0xf1, 0xcb, 0x58, 0x66, 0xe3, 0x23, 0xa7, 0xd9, 0xeb, 0x42, - 0x9b, 0xb3, 0xa5, 0x77, 0xc9, 0x7c, 0x17, 0xef, 0x92, 0x67, 0x95, 0x46, 0xdb, 0xb2, 0xec, 0xef, - 0xcb, 0x50, 0xd3, 0xd1, 0x7c, 0xad, 0xb0, 0x71, 0xe4, 0xe0, 0x12, 0x30, 0x8b, 0x96, 0xc6, 0x5d, - 0x2a, 0xa7, 0xf5, 0x52, 0xfe, 0x5f, 0xf5, 0x52, 0x79, 0x47, 0xbd, 0x3c, 0x30, 0x69, 0xd1, 0x45, - 0xdd, 0xcb, 0x12, 0x9b, 0xcb, 0x0e, 0x65, 0x67, 0x26, 0x3d, 0x3f, 0x79, 0xad, 0xae, 0x6f, 0xe6, - 0x2c, 0xde, 0x50, 0xd3, 0x59, 0xff, 0xd5, 0xdf, 0xda, 0x7f, 0x57, 0xab, 0xcf, 0xba, 0x5e, 0x7d, - 0xff, 0x5f, 0x49, 0xfc, 0x1a, 0x3a, 0x57, 0x0e, 0x9e, 0xd6, 0x5d, 0xf1, 0x9d, 0x75, 0x97, 0x2b, - 0xdc, 0xd2, 0xd5, 0xc2, 0xdd, 0x6b, 0x03, 0xcc, 0x52, 0x83, 0xcd, 0xaf, 0x7e, 0x48, 0xc9, 0xe5, - 0x2b, 0xaa, 0x74, 0xa5, 0xa2, 0xec, 0xef, 0xab, 0x50, 0x1e, 0x1c, 0x4c, 0xaf, 0x24, 0xb6, 0x78, - 0x2d, 0xb1, 0x3d, 0xa8, 0xb3, 0xd0, 0x3b, 0x5d, 0x32, 0x7d, 0x98, 0x06, 0x4d, 0x44, 0x74, 0x2c, - 0xa4, 0xc7, 0x65, 0xae, 0x54, 0x95, 0x3c, 0x15, 0xe4, 0x13, 0xa8, 0xc5, 0x1e, 0xc7, 0x1e, 0xad, - 0x64, 0x5f, 0x3a, 0x38, 0x98, 0xee, 0x8c, 0x95, 0x96, 0x1a, 0x14, 0x9b, 0xf9, 0x74, 0xad, 0x97, - 0x91, 0xaa, 0x9a, 0xb7, 0x29, 0x71, 0x4f, 0xa9, 0x69, 0x02, 0x93, 0x9f, 0x81, 0x15, 0x84, 0xb3, - 0x68, 0x15, 0x84, 0x73, 0xf7, 0xb7, 0x6b, 0xc6, 0x03, 0x26, 0x4c, 0xd2, 0x49, 0x62, 0xf2, 0x7c, - 0xcd, 0xf8, 0xa5, 0xbe, 0x00, 0x6e, 0x24, 0xdc, 0xe7, 0x9a, 0x8a, 0xe6, 0xd1, 0x5a, 0xce, 0xa3, - 0xbc, 0x79, 0xfd, 0xed, 0xe6, 0x09, 0xd7, 0x98, 0x6f, 0xce, 0xa1, 0xa6, 0x4f, 0x4e, 0xda, 0x50, - 0x3c, 0x37, 0x01, 0x2e, 0x9e, 0xe3, 0x4e, 0xeb, 0x2d, 0xe3, 0x85, 0x67, 0x02, 0xab, 0x05, 0xf2, - 0x31, 0x74, 0xfd, 0x40, 0xfc, 0x06, 0xa7, 0xaf, 0x1b, 0x7b, 0x72, 0x91, 0x84, 0xa7, 0x93, 0x68, - 0xc7, 0xa8, 0xc4, 0xdd, 0xe6, 0x94, 0x49, 0x4f, 0x85, 0xa8, 0x42, 0xd5, 0xf3, 0xe6, 0x5f, 0x8a, - 0xd0, 0x1c, 0xe3, 0x6d, 0x11, 0x62, 0x5e, 0x72, 0x37, 0x49, 0xf1, 0xca, 0x4d, 0xf2, 0x30, 0xad, - 0x64, 0xbd, 0x04, 0xf5, 0xd2, 0xf8, 0x26, 0xb6, 0xd7, 0xeb, 0xda, 0x86, 0x8e, 0x37, 0x67, 0x6e, - 0x10, 0xba, 0x3a, 0xa2, 0x66, 0x0b, 0x6e, 0x79, 0x73, 0x36, 0x0c, 0x75, 0xb0, 0xed, 0xaf, 0xb0, - 0xf3, 0x15, 0x1b, 0xa0, 0xb6, 0xbb, 0x3f, 0x1d, 0xbe, 0x70, 0xac, 0x02, 0x69, 0x41, 0xfd, 0x68, - 0x38, 0x99, 0xe0, 0xfa, 0x53, 0x24, 0x6d, 0x68, 0x50, 0xe7, 0x99, 0xb3, 0x3f, 0x75, 0x06, 0x56, - 0x09, 0x57, 0x9d, 0xfd, 0xdd, 0xd1, 0x60, 0x38, 0xd8, 0x9d, 0x3a, 0x56, 0x79, 0x73, 0x1f, 0x6a, - 0xda, 0x13, 0x6e, 0xf1, 0xb3, 0x38, 0xd9, 0xf4, 0xf1, 0x91, 0x7c, 0x0a, 0x55, 0x3c, 0x7b, 0x72, - 0xaf, 0x6e, 0xbc, 0x76, 0x60, 0xaa, 0xf1, 0xcd, 0x17, 0x00, 0x59, 0x26, 0xb0, 0xfa, 0xc4, 0x7a, - 0x36, 0x63, 0x22, 0xa9, 0xea, 0x44, 0xc4, 0xd0, 0x33, 0xce, 0x23, 0x9e, 0x84, 0x5e, 0x09, 0xc8, - 0xc7, 0x76, 0xc7, 0xcd, 0xc0, 0x94, 0xa4, 0x11, 0xed, 0x5f, 0x02, 0x4c, 0xd6, 0xa7, 0xe2, 0x52, - 0x48, 0xb6, 0x12, 0xe4, 0x21, 0xb4, 0xb2, 0x3e, 0xd2, 0x8b, 0xc0, 0x6b, 0xfd, 0x48, 0xf3, 0x14, - 0x72, 0x0b, 0xca, 0xfe, 0x22, 0xd9, 0x37, 0xea, 0xe6, 0xf8, 0x14, 0x75, 0xf6, 0x3f, 0x8a, 0x50, - 0xc5, 0xc0, 0x31, 0xb2, 0x03, 0x20, 0xd2, 0x97, 0xe4, 0xbb, 0x3c, 0x7b, 0x35, 0xcd, 0x31, 0xf2, - 0x73, 0xb1, 0xf4, 0x8e, 0xb9, 0x78, 0x1b, 0x00, 0xf7, 0x70, 0x2f, 0xcc, 0xf5, 0x5a, 0xd3, 0x68, - 0x74, 0x87, 0xa7, 0x8d, 0x58, 0xb9, 0xda, 0x88, 0x0f, 0xe1, 0x3d, 0x11, 0x7a, 0xb1, 0x58, 0x44, - 0xd2, 0xf5, 0xd7, 0x5c, 0x2d, 0x95, 0xee, 0x4a, 0x98, 0x5f, 0x33, 0x24, 0xc1, 0x06, 0x06, 0x3a, - 0x12, 0xf6, 0x14, 0xaa, 0x6a, 0x17, 0x24, 0x77, 0xcd, 0x9a, 0x9d, 0x5b, 0xc8, 0xb2, 0x25, 0x51, - 0xef, 0xd2, 0x5d, 0x28, 0xa5, 0x43, 0xa5, 0x24, 0x05, 0x46, 0x7f, 0x16, 0x85, 0x92, 0x85, 0xd2, - 0xac, 0xa4, 0x89, 0x68, 0xff, 0xa1, 0x04, 0x9d, 0x09, 0xe3, 0x17, 0x8c, 0x1f, 0x31, 0x21, 0xbc, - 0xb9, 0xba, 0x12, 0xf2, 0xbf, 0x1c, 0xcd, 0xa7, 0x9b, 0xdf, 0x95, 0xd9, 0x9a, 0x7b, 0x17, 0xaa, - 0x58, 0xc1, 0xcc, 0xc4, 0xa7, 0x99, 0x8c, 0x6c, 0x76, 0x50, 0xa0, 0x1a, 0x21, 0x9f, 0x42, 0x9d, - 0xeb, 0xb5, 0x5a, 0xbd, 0xd5, 0x78, 0x32, 0x9b, 0xf6, 0x41, 0x81, 0x26, 0x28, 0xfa, 0x52, 0xbb, - 0xad, 0x19, 0x4a, 0xcd, 0xf4, 0x93, 0xd0, 0x97, 0x42, 0xc8, 0x23, 0x68, 0x70, 0x26, 0xe2, 0x28, - 0x14, 0xcc, 0xdc, 0x42, 0x37, 0x75, 0x51, 0xac, 0x56, 0x5e, 0xe8, 0x53, 0x03, 0x1d, 0x14, 0x68, - 0x4a, 0x23, 0xf7, 0xa1, 0x16, 0x46, 0x32, 0x98, 0x25, 0x97, 0x90, 0xda, 0xc5, 0xf4, 0xb7, 0x8e, - 0x94, 0x1e, 0xc7, 0xb7, 0x66, 0xec, 0x35, 0xa1, 0x1e, 0x7b, 0x97, 0xcb, 0xc8, 0xf3, 0xed, 0xdf, - 0xa9, 0xbb, 0xe1, 0x2c, 0x98, 0x9b, 0xd0, 0x93, 0x1d, 0xb8, 0xc9, 0x19, 0x06, 0x0b, 0x53, 0x14, - 0x33, 0x1e, 0x44, 0xbe, 0xbb, 0x4a, 0xca, 0x7e, 0x23, 0x85, 0xc6, 0x0a, 0x39, 0x12, 0xe4, 0xa7, - 0xb0, 0xa9, 0xbe, 0xdf, 0x4d, 0x13, 0x1c, 0xe0, 0x8f, 0xe3, 0x0b, 0x6f, 0xe9, 0xae, 0x92, 0xa4, - 0x7c, 0xa0, 0x18, 0x13, 0x43, 0x18, 0x1a, 0xfc, 0x48, 0xd8, 0xbf, 0x2f, 0x43, 0x67, 0x7f, 0x19, - 0xb0, 0x50, 0x9a, 0x4f, 0xfb, 0xa1, 0xf9, 0xd0, 0x3b, 0x81, 0x49, 0x79, 0xe0, 0xe3, 0x8a, 0x37, - 0xd3, 0x1e, 0xcc, 0xd5, 0xaf, 0x57, 0xbc, 0xbc, 0xeb, 0x34, 0x7a, 0x09, 0x53, 0x8d, 0xaf, 0x68, - 0xcd, 0x67, 0x4c, 0x65, 0xc2, 0x8c, 0xaf, 0xab, 0x36, 0x13, 0x85, 0x53, 0xc3, 0x23, 0x9f, 0x01, - 0xde, 0x80, 0x67, 0xc1, 0xdc, 0x64, 0x65, 0xc3, 0xb4, 0x6a, 0x16, 0x3f, 0x6a, 0x08, 0xf6, 0x03, - 0xa8, 0x69, 0x63, 0xd2, 0x84, 0xea, 0x64, 0x8a, 0xa3, 0x49, 0x0d, 0x31, 0x7a, 0x32, 0x52, 0xbf, - 0xd6, 0x8a, 0x38, 0xdd, 0x9c, 0x17, 0xce, 0x68, 0x3a, 0xb1, 0x4a, 0xf6, 0xb7, 0x50, 0x4f, 0x22, - 0xd0, 0x84, 0xea, 0x81, 0x73, 0x78, 0x78, 0x6c, 0xe8, 0xce, 0xf3, 0x13, 0x67, 0x32, 0xb5, 0x8a, - 0xe4, 0x06, 0xb4, 0xc6, 0x27, 0x93, 0x03, 0xd7, 0x19, 0xed, 0xee, 0x1d, 0x3a, 0x56, 0x89, 0x58, - 0xd0, 0x56, 0x8a, 0xc1, 0x70, 0xa2, 0x34, 0x65, 0xd2, 0x05, 0x50, 0x9a, 0xf1, 0xee, 0xc9, 0xc4, - 0xb1, 0x2a, 0xa9, 0x09, 0x75, 0x26, 0x27, 0x47, 0x8e, 0x55, 0x25, 0x1b, 0xd0, 0x39, 0x19, 0xe3, - 0x98, 0x74, 0xf7, 0x8f, 0x47, 0x3f, 0x1f, 0x3e, 0xb5, 0xea, 0xf6, 0xdf, 0x8a, 0x70, 0xe3, 0x5a, - 0x5d, 0xe5, 0x36, 0x2e, 0x1d, 0xdd, 0x3e, 0xd4, 0xb8, 0xfa, 0x13, 0xc5, 0xcc, 0xf9, 0xcd, 0x37, - 0x14, 0xe3, 0x8e, 0xfe, 0x9b, 0x85, 0x1a, 0x66, 0x36, 0x18, 0x75, 0x0b, 0x9a, 0xc1, 0xf8, 0x25, - 0x58, 0xec, 0xec, 0x0c, 0x87, 0xd9, 0x05, 0x73, 0x4d, 0x28, 0x2b, 0x6f, 0x0b, 0xe5, 0x8d, 0x94, - 0xaa, 0xf5, 0xf6, 0x2d, 0xa8, 0xe9, 0xb7, 0x90, 0x1a, 0x94, 0x8e, 0xbf, 0xb6, 0x0a, 0xa4, 0x0e, - 0x65, 0x87, 0x52, 0xab, 0x88, 0x3f, 0x11, 0xf3, 0xc5, 0x4e, 0x3e, 0x83, 0xca, 0x79, 0x10, 0xea, - 0x8f, 0xe8, 0xf6, 0xdf, 0xbf, 0xde, 0x0c, 0x3b, 0x5f, 0x07, 0xa1, 0x4f, 0x15, 0xc5, 0xbe, 0x0d, - 0x15, 0x94, 0xc8, 0xfb, 0xb0, 0x31, 0x18, 0x4e, 0xf6, 0x77, 0xe9, 0x60, 0x38, 0x7a, 0xea, 0x9a, - 0xd4, 0x14, 0xee, 0x3b, 0x6f, 0xbc, 0x8e, 0x00, 0x6a, 0xfb, 0x87, 0xc7, 0x13, 0x67, 0x60, 0x15, - 0x31, 0x4d, 0xc7, 0x63, 0x67, 0x84, 0x57, 0x53, 0x09, 0x05, 0x04, 0x50, 0x28, 0x63, 0x2e, 0x1d, - 0x4a, 0x8f, 0xa9, 0x55, 0xb9, 0x7f, 0x0f, 0x2a, 0xb8, 0x89, 0xe2, 0x65, 0x35, 0x1c, 0x0d, 0xa7, - 0xc3, 0xdd, 0xe9, 0x31, 0xb5, 0x0a, 0x28, 0x52, 0x67, 0x32, 0x3e, 0x1e, 0x0d, 0xf0, 0xb7, 0xfc, - 0x5e, 0xef, 0xef, 0x2f, 0xb7, 0x8a, 0xdf, 0xbd, 0xdc, 0x2a, 0xfe, 0xe7, 0xe5, 0x56, 0xf1, 0x8f, - 0xaf, 0xb6, 0x0a, 0xdf, 0xbd, 0xda, 0x2a, 0xfc, 0xf3, 0xd5, 0x56, 0xe1, 0xb4, 0xa6, 0x56, 0xa1, - 0xc7, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x54, 0x4b, 0x9d, 0x7a, 0x0f, 0x13, 0x00, 0x00, -} - -func (m *Version) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Version) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Version) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Version != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Version)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ResultCounter) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResultCounter) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResultCounter) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Err != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Err)) - i-- - dAtA[i] = 0x18 - } - if m.Ok != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Ok)) - i-- - dAtA[i] = 0x10 - } - if m.Total != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Total)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *SlidingCounter) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SlidingCounter) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SlidingCounter) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Over_24Hr != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Over_24Hr)) - i-- - dAtA[i] = 0x50 - } - if m.Over_12Hr != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Over_12Hr)) - i-- - dAtA[i] = 0x48 - } - if m.Over_8Hr != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Over_8Hr)) - i-- - dAtA[i] = 0x40 - } - if m.Over_4Hr != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Over_4Hr)) - i-- - dAtA[i] = 0x38 - } - if m.Over_2Hr != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Over_2Hr)) - i-- - dAtA[i] = 0x30 - } - if m.Over_1Hr != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Over_1Hr)) - i-- - dAtA[i] = 0x28 - } - if m.Over_30M != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Over_30M)) - i-- - dAtA[i] = 0x20 - } - if m.Over_15M != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Over_15M)) - i-- - dAtA[i] = 0x18 - } - if m.Over_5M != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Over_5M)) - i-- - dAtA[i] = 0x10 - } - if m.Over_1M != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Over_1M)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *DataGauge) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DataGauge) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DataGauge) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.InstBw != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.InstBw)) - i-- - dAtA[i] = 0x18 - } - if m.CumPackets != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.CumPackets)) - i-- - dAtA[i] = 0x10 - } - if m.CumBytes != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.CumBytes)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *EventType) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EventType) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EventType) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.PropertyTypes) > 0 { - for iNdEx := len(m.PropertyTypes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.PropertyTypes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *EventType_EventProperty) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EventType_EventProperty) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EventType_EventProperty) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.HasMultiple { - i-- - if m.HasMultiple { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.Type != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x10 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Runtime) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Runtime) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Runtime) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.EventTypes) > 0 { - for iNdEx := len(m.EventTypes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.EventTypes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } - if len(m.PeerId) > 0 { - i -= len(m.PeerId) - copy(dAtA[i:], m.PeerId) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.PeerId))) - i-- - dAtA[i] = 0x22 - } - if len(m.Platform) > 0 { - i -= len(m.Platform) - copy(dAtA[i:], m.Platform) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Platform))) - i-- - dAtA[i] = 0x1a - } - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x12 - } - if len(m.Implementation) > 0 { - i -= len(m.Implementation) - copy(dAtA[i:], m.Implementation) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Implementation))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *EndpointPair) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EndpointPair) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EndpointPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.DstMultiaddr) > 0 { - i -= len(m.DstMultiaddr) - copy(dAtA[i:], m.DstMultiaddr) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.DstMultiaddr))) - i-- - dAtA[i] = 0x12 - } - if len(m.SrcMultiaddr) > 0 { - i -= len(m.SrcMultiaddr) - copy(dAtA[i:], m.SrcMultiaddr) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.SrcMultiaddr))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Traffic) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Traffic) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Traffic) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.TrafficOut != nil { - { - size, err := m.TrafficOut.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.TrafficIn != nil { - { - size, err := m.TrafficIn.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StreamList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StreamList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StreamList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Streams) > 0 { - for iNdEx := len(m.Streams) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Streams[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.StreamIds) > 0 { - for iNdEx := len(m.StreamIds) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.StreamIds[iNdEx]) - copy(dAtA[i:], m.StreamIds[iNdEx]) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.StreamIds[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Connection) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Connection) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Connection) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.UserProvidedTags) > 0 { - for iNdEx := len(m.UserProvidedTags) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.UserProvidedTags[iNdEx]) - copy(dAtA[i:], m.UserProvidedTags[iNdEx]) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.UserProvidedTags[iNdEx]))) - i-- - dAtA[i] = 0x6 - i-- - dAtA[i] = 0x9a - } - } - if m.RelayedOver != nil { - { - size := m.RelayedOver.Size() - i -= size - if _, err := m.RelayedOver.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - if m.Streams != nil { - { - size, err := m.Streams.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - if m.LatencyNs != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.LatencyNs)) - i-- - dAtA[i] = 0x50 - } - if m.Attribs != nil { - { - size, err := m.Attribs.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - if m.Traffic != nil { - { - size, err := m.Traffic.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if m.Role != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Role)) - i-- - dAtA[i] = 0x38 - } - if m.Timeline != nil { - { - size, err := m.Timeline.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.Endpoints != nil { - { - size, err := m.Endpoints.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if len(m.TransportId) > 0 { - i -= len(m.TransportId) - copy(dAtA[i:], m.TransportId) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.TransportId))) - i-- - dAtA[i] = 0x22 - } - if m.Status != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Status)) - i-- - dAtA[i] = 0x18 - } - if len(m.PeerId) > 0 { - i -= len(m.PeerId) - copy(dAtA[i:], m.PeerId) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.PeerId))) - i-- - dAtA[i] = 0x12 - } - if len(m.Id) > 0 { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Id))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Connection_ConnId) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Connection_ConnId) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.ConnId != nil { - i -= len(m.ConnId) - copy(dAtA[i:], m.ConnId) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.ConnId))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x82 - } - return len(dAtA) - i, nil -} -func (m *Connection_Conn) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Connection_Conn) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Conn != nil { - { - size, err := m.Conn.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - } - return len(dAtA) - i, nil -} -func (m *Connection_Timeline) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Connection_Timeline) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Connection_Timeline) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CloseTs != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.CloseTs)) - i-- - dAtA[i] = 0x18 - } - if m.UpgradedTs != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.UpgradedTs)) - i-- - dAtA[i] = 0x10 - } - if m.OpenTs != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.OpenTs)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Connection_Attributes) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Connection_Attributes) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Connection_Attributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Encryption) > 0 { - i -= len(m.Encryption) - copy(dAtA[i:], m.Encryption) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Encryption))) - i-- - dAtA[i] = 0x12 - } - if len(m.Multiplexer) > 0 { - i -= len(m.Multiplexer) - copy(dAtA[i:], m.Multiplexer) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Multiplexer))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Stream) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Stream) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Stream) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.UserProvidedTags) > 0 { - for iNdEx := len(m.UserProvidedTags) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.UserProvidedTags[iNdEx]) - copy(dAtA[i:], m.UserProvidedTags[iNdEx]) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.UserProvidedTags[iNdEx]))) - i-- - dAtA[i] = 0x6 - i-- - dAtA[i] = 0x9a - } - } - if m.LatencyNs != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.LatencyNs)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 - } - if m.Status != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Status)) - i-- - dAtA[i] = 0x38 - } - if m.Timeline != nil { - { - size, err := m.Timeline.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.Conn != nil { - { - size, err := m.Conn.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.Traffic != nil { - { - size, err := m.Traffic.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.Role != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Role)) - i-- - dAtA[i] = 0x18 - } - if len(m.Protocol) > 0 { - i -= len(m.Protocol) - copy(dAtA[i:], m.Protocol) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Protocol))) - i-- - dAtA[i] = 0x12 - } - if len(m.Id) > 0 { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Id))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Stream_ConnectionRef) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Stream_ConnectionRef) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Stream_ConnectionRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Connection != nil { - { - size := m.Connection.Size() - i -= size - if _, err := m.Connection.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *Stream_ConnectionRef_Conn) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Stream_ConnectionRef_Conn) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Conn != nil { - { - size, err := m.Conn.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} -func (m *Stream_ConnectionRef_ConnId) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Stream_ConnectionRef_ConnId) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.ConnId != nil { - i -= len(m.ConnId) - copy(dAtA[i:], m.ConnId) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.ConnId))) - i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} -func (m *Stream_Timeline) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Stream_Timeline) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Stream_Timeline) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CloseTs != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.CloseTs)) - i-- - dAtA[i] = 0x10 - } - if m.OpenTs != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.OpenTs)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *DHT) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DHT) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DHT) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.OutgoingQueries != nil { - { - size, err := m.OutgoingQueries.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if m.IncomingQueries != nil { - { - size, err := m.IncomingQueries.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if len(m.Buckets) > 0 { - for iNdEx := len(m.Buckets) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Buckets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if m.Params != nil { - { - size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.StartTs != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.StartTs)) - i-- - dAtA[i] = 0x18 - } - if m.Enabled { - i-- - if m.Enabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.Protocol) > 0 { - i -= len(m.Protocol) - copy(dAtA[i:], m.Protocol) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Protocol))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DHT_Params) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DHT_Params) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DHT_Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Beta != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Beta)) - i-- - dAtA[i] = 0x20 - } - if m.DisjointPaths != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.DisjointPaths)) - i-- - dAtA[i] = 0x18 - } - if m.Alpha != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Alpha)) - i-- - dAtA[i] = 0x10 - } - if m.K != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.K)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *DHT_PeerInDHT) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DHT_PeerInDHT) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DHT_PeerInDHT) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.AgeInBucket != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.AgeInBucket)) - i-- - dAtA[i] = 0x18 - } - if m.Status != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Status)) - i-- - dAtA[i] = 0x10 - } - if len(m.PeerId) > 0 { - i -= len(m.PeerId) - copy(dAtA[i:], m.PeerId) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.PeerId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DHT_Bucket) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DHT_Bucket) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DHT_Bucket) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Peers) > 0 { - for iNdEx := len(m.Peers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Peers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.Cpl != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Cpl)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *DHT_QueryGauge) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DHT_QueryGauge) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DHT_QueryGauge) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Timeout != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Timeout)) - i-- - dAtA[i] = 0x18 - } - if m.Error != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Error)) - i-- - dAtA[i] = 0x10 - } - if m.Success != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Success)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Subsystems) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Subsystems) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Subsystems) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Dht != nil { - { - size, err := m.Dht.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Connections) > 0 { - for iNdEx := len(m.Connections) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Connections[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *State) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *State) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.SnapshotDurationMs != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.SnapshotDurationMs)) - i-- - dAtA[i] = 0x28 - } - if m.StartTs != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.StartTs)) - i-- - dAtA[i] = 0x20 - } - if m.InstantTs != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.InstantTs)) - i-- - dAtA[i] = 0x18 - } - if m.Traffic != nil { - { - size, err := m.Traffic.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Subsystems != nil { - { - size, err := m.Subsystems.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Event) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Event) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Content) > 0 { - i -= len(m.Content) - copy(dAtA[i:], m.Content) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Content))) - i-- - dAtA[i] = 0x1a - } - if m.Ts != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Ts)) - i-- - dAtA[i] = 0x10 - } - if m.Type != nil { - { - size, err := m.Type.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ServerMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServerMessage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServerMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Payload != nil { - { - size := m.Payload.Size() - i -= size - if _, err := m.Payload.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - if m.Version != nil { - { - size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ServerMessage_State) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServerMessage_State) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.State != nil { - { - size, err := m.State.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} -func (m *ServerMessage_Runtime) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServerMessage_Runtime) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Runtime != nil { - { - size, err := m.Runtime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - return len(dAtA) - i, nil -} -func (m *ServerMessage_Event) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServerMessage_Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Event != nil { - { - size, err := m.Event.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - return len(dAtA) - i, nil -} -func (m *ServerMessage_Response) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServerMessage_Response) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Response != nil { - { - size, err := m.Response.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - return len(dAtA) - i, nil -} -func (m *ServerMessage_Notice) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServerMessage_Notice) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Notice != nil { - { - size, err := m.Notice.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - return len(dAtA) - i, nil -} -func (m *Configuration) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Configuration) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Configuration) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.StateSnapshotIntervalMs != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.StateSnapshotIntervalMs)) - i-- - dAtA[i] = 0x10 - } - if m.RetentionPeriodMs != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.RetentionPeriodMs)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ClientCommand) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClientCommand) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClientCommand) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Config != nil { - { - size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.Source != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Source)) - i-- - dAtA[i] = 0x20 - } - if m.Command != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Command)) - i-- - dAtA[i] = 0x18 - } - if m.Id != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Id)) - i-- - dAtA[i] = 0x10 - } - if m.Version != nil { - { - size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CommandResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CommandResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CommandResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.EffectiveConfig != nil { - { - size, err := m.EffectiveConfig.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if len(m.Error) > 0 { - i -= len(m.Error) - copy(dAtA[i:], m.Error) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Error))) - i-- - dAtA[i] = 0x1a - } - if m.Result != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Result)) - i-- - dAtA[i] = 0x10 - } - if m.Id != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Id)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ServerNotice) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServerNotice) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServerNotice) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Kind != 0 { - i = encodeVarintIntrospection(dAtA, i, uint64(m.Kind)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintIntrospection(dAtA []byte, offset int, v uint64) int { - offset -= sovIntrospection(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Version) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Version != 0 { - n += 1 + sovIntrospection(uint64(m.Version)) - } - return n -} - -func (m *ResultCounter) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Total != 0 { - n += 1 + sovIntrospection(uint64(m.Total)) - } - if m.Ok != 0 { - n += 1 + sovIntrospection(uint64(m.Ok)) - } - if m.Err != 0 { - n += 1 + sovIntrospection(uint64(m.Err)) - } - return n -} - -func (m *SlidingCounter) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Over_1M != 0 { - n += 1 + sovIntrospection(uint64(m.Over_1M)) - } - if m.Over_5M != 0 { - n += 1 + sovIntrospection(uint64(m.Over_5M)) - } - if m.Over_15M != 0 { - n += 1 + sovIntrospection(uint64(m.Over_15M)) - } - if m.Over_30M != 0 { - n += 1 + sovIntrospection(uint64(m.Over_30M)) - } - if m.Over_1Hr != 0 { - n += 1 + sovIntrospection(uint64(m.Over_1Hr)) - } - if m.Over_2Hr != 0 { - n += 1 + sovIntrospection(uint64(m.Over_2Hr)) - } - if m.Over_4Hr != 0 { - n += 1 + sovIntrospection(uint64(m.Over_4Hr)) - } - if m.Over_8Hr != 0 { - n += 1 + sovIntrospection(uint64(m.Over_8Hr)) - } - if m.Over_12Hr != 0 { - n += 1 + sovIntrospection(uint64(m.Over_12Hr)) - } - if m.Over_24Hr != 0 { - n += 1 + sovIntrospection(uint64(m.Over_24Hr)) - } - return n -} - -func (m *DataGauge) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.CumBytes != 0 { - n += 1 + sovIntrospection(uint64(m.CumBytes)) - } - if m.CumPackets != 0 { - n += 1 + sovIntrospection(uint64(m.CumPackets)) - } - if m.InstBw != 0 { - n += 1 + sovIntrospection(uint64(m.InstBw)) - } - return n -} - -func (m *EventType) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - if len(m.PropertyTypes) > 0 { - for _, e := range m.PropertyTypes { - l = e.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - } - return n -} - -func (m *EventType_EventProperty) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.Type != 0 { - n += 1 + sovIntrospection(uint64(m.Type)) - } - if m.HasMultiple { - n += 2 - } - return n -} - -func (m *Runtime) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Implementation) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - l = len(m.Platform) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - l = len(m.PeerId) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - if len(m.EventTypes) > 0 { - for _, e := range m.EventTypes { - l = e.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - } - return n -} - -func (m *EndpointPair) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.SrcMultiaddr) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - l = len(m.DstMultiaddr) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - return n -} - -func (m *Traffic) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TrafficIn != nil { - l = m.TrafficIn.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.TrafficOut != nil { - l = m.TrafficOut.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - return n -} - -func (m *StreamList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.StreamIds) > 0 { - for _, b := range m.StreamIds { - l = len(b) - n += 1 + l + sovIntrospection(uint64(l)) - } - } - if len(m.Streams) > 0 { - for _, e := range m.Streams { - l = e.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - } - return n -} - -func (m *Connection) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Id) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - l = len(m.PeerId) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.Status != 0 { - n += 1 + sovIntrospection(uint64(m.Status)) - } - l = len(m.TransportId) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.Endpoints != nil { - l = m.Endpoints.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.Timeline != nil { - l = m.Timeline.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.Role != 0 { - n += 1 + sovIntrospection(uint64(m.Role)) - } - if m.Traffic != nil { - l = m.Traffic.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.Attribs != nil { - l = m.Attribs.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.LatencyNs != 0 { - n += 1 + sovIntrospection(uint64(m.LatencyNs)) - } - if m.Streams != nil { - l = m.Streams.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.RelayedOver != nil { - n += m.RelayedOver.Size() - } - if len(m.UserProvidedTags) > 0 { - for _, s := range m.UserProvidedTags { - l = len(s) - n += 2 + l + sovIntrospection(uint64(l)) - } - } - return n -} - -func (m *Connection_ConnId) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ConnId != nil { - l = len(m.ConnId) - n += 2 + l + sovIntrospection(uint64(l)) - } - return n -} -func (m *Connection_Conn) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Conn != nil { - l = m.Conn.Size() - n += 2 + l + sovIntrospection(uint64(l)) - } - return n -} -func (m *Connection_Timeline) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.OpenTs != 0 { - n += 1 + sovIntrospection(uint64(m.OpenTs)) - } - if m.UpgradedTs != 0 { - n += 1 + sovIntrospection(uint64(m.UpgradedTs)) - } - if m.CloseTs != 0 { - n += 1 + sovIntrospection(uint64(m.CloseTs)) - } - return n -} - -func (m *Connection_Attributes) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Multiplexer) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - l = len(m.Encryption) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - return n -} - -func (m *Stream) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Id) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - l = len(m.Protocol) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.Role != 0 { - n += 1 + sovIntrospection(uint64(m.Role)) - } - if m.Traffic != nil { - l = m.Traffic.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.Conn != nil { - l = m.Conn.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.Timeline != nil { - l = m.Timeline.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.Status != 0 { - n += 1 + sovIntrospection(uint64(m.Status)) - } - if m.LatencyNs != 0 { - n += 2 + sovIntrospection(uint64(m.LatencyNs)) - } - if len(m.UserProvidedTags) > 0 { - for _, s := range m.UserProvidedTags { - l = len(s) - n += 2 + l + sovIntrospection(uint64(l)) - } - } - return n -} - -func (m *Stream_ConnectionRef) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Connection != nil { - n += m.Connection.Size() - } - return n -} - -func (m *Stream_ConnectionRef_Conn) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Conn != nil { - l = m.Conn.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - return n -} -func (m *Stream_ConnectionRef_ConnId) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ConnId != nil { - l = len(m.ConnId) - n += 1 + l + sovIntrospection(uint64(l)) - } - return n -} -func (m *Stream_Timeline) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.OpenTs != 0 { - n += 1 + sovIntrospection(uint64(m.OpenTs)) - } - if m.CloseTs != 0 { - n += 1 + sovIntrospection(uint64(m.CloseTs)) - } - return n -} - -func (m *DHT) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Protocol) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.Enabled { - n += 2 - } - if m.StartTs != 0 { - n += 1 + sovIntrospection(uint64(m.StartTs)) - } - if m.Params != nil { - l = m.Params.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - if len(m.Buckets) > 0 { - for _, e := range m.Buckets { - l = e.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - } - if m.IncomingQueries != nil { - l = m.IncomingQueries.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.OutgoingQueries != nil { - l = m.OutgoingQueries.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - return n -} - -func (m *DHT_Params) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.K != 0 { - n += 1 + sovIntrospection(uint64(m.K)) - } - if m.Alpha != 0 { - n += 1 + sovIntrospection(uint64(m.Alpha)) - } - if m.DisjointPaths != 0 { - n += 1 + sovIntrospection(uint64(m.DisjointPaths)) - } - if m.Beta != 0 { - n += 1 + sovIntrospection(uint64(m.Beta)) - } - return n -} - -func (m *DHT_PeerInDHT) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PeerId) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.Status != 0 { - n += 1 + sovIntrospection(uint64(m.Status)) - } - if m.AgeInBucket != 0 { - n += 1 + sovIntrospection(uint64(m.AgeInBucket)) - } - return n -} - -func (m *DHT_Bucket) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Cpl != 0 { - n += 1 + sovIntrospection(uint64(m.Cpl)) - } - if len(m.Peers) > 0 { - for _, e := range m.Peers { - l = e.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - } - return n -} - -func (m *DHT_QueryGauge) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Success != 0 { - n += 1 + sovIntrospection(uint64(m.Success)) - } - if m.Error != 0 { - n += 1 + sovIntrospection(uint64(m.Error)) - } - if m.Timeout != 0 { - n += 1 + sovIntrospection(uint64(m.Timeout)) - } - return n -} - -func (m *Subsystems) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Connections) > 0 { - for _, e := range m.Connections { - l = e.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - } - if m.Dht != nil { - l = m.Dht.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - return n -} - -func (m *State) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Subsystems != nil { - l = m.Subsystems.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.Traffic != nil { - l = m.Traffic.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.InstantTs != 0 { - n += 1 + sovIntrospection(uint64(m.InstantTs)) - } - if m.StartTs != 0 { - n += 1 + sovIntrospection(uint64(m.StartTs)) - } - if m.SnapshotDurationMs != 0 { - n += 1 + sovIntrospection(uint64(m.SnapshotDurationMs)) - } - return n -} - -func (m *Event) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Type != nil { - l = m.Type.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.Ts != 0 { - n += 1 + sovIntrospection(uint64(m.Ts)) - } - l = len(m.Content) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - return n -} - -func (m *ServerMessage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Version != nil { - l = m.Version.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.Payload != nil { - n += m.Payload.Size() - } - return n -} - -func (m *ServerMessage_State) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.State != nil { - l = m.State.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - return n -} -func (m *ServerMessage_Runtime) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Runtime != nil { - l = m.Runtime.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - return n -} -func (m *ServerMessage_Event) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Event != nil { - l = m.Event.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - return n -} -func (m *ServerMessage_Response) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Response != nil { - l = m.Response.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - return n -} -func (m *ServerMessage_Notice) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Notice != nil { - l = m.Notice.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - return n -} -func (m *Configuration) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.RetentionPeriodMs != 0 { - n += 1 + sovIntrospection(uint64(m.RetentionPeriodMs)) - } - if m.StateSnapshotIntervalMs != 0 { - n += 1 + sovIntrospection(uint64(m.StateSnapshotIntervalMs)) - } - return n -} - -func (m *ClientCommand) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Version != nil { - l = m.Version.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.Id != 0 { - n += 1 + sovIntrospection(uint64(m.Id)) - } - if m.Command != 0 { - n += 1 + sovIntrospection(uint64(m.Command)) - } - if m.Source != 0 { - n += 1 + sovIntrospection(uint64(m.Source)) - } - if m.Config != nil { - l = m.Config.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - return n -} - -func (m *CommandResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Id != 0 { - n += 1 + sovIntrospection(uint64(m.Id)) - } - if m.Result != 0 { - n += 1 + sovIntrospection(uint64(m.Result)) - } - l = len(m.Error) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.EffectiveConfig != nil { - l = m.EffectiveConfig.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - return n -} - -func (m *ServerNotice) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Kind != 0 { - n += 1 + sovIntrospection(uint64(m.Kind)) - } - return n -} - -func sovIntrospection(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozIntrospection(x uint64) (n int) { - return sovIntrospection(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Version) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Version: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Version: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - m.Version = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Version |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResultCounter) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResultCounter: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResultCounter: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) - } - m.Total = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Total |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Ok", wireType) - } - m.Ok = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Ok |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) - } - m.Err = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Err |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SlidingCounter) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SlidingCounter: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SlidingCounter: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Over_1M", wireType) - } - m.Over_1M = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Over_1M |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Over_5M", wireType) - } - m.Over_5M = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Over_5M |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Over_15M", wireType) - } - m.Over_15M = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Over_15M |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Over_30M", wireType) - } - m.Over_30M = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Over_30M |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Over_1Hr", wireType) - } - m.Over_1Hr = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Over_1Hr |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Over_2Hr", wireType) - } - m.Over_2Hr = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Over_2Hr |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Over_4Hr", wireType) - } - m.Over_4Hr = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Over_4Hr |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Over_8Hr", wireType) - } - m.Over_8Hr = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Over_8Hr |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Over_12Hr", wireType) - } - m.Over_12Hr = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Over_12Hr |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Over_24Hr", wireType) - } - m.Over_24Hr = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Over_24Hr |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DataGauge) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DataGauge: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DataGauge: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CumBytes", wireType) - } - m.CumBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CumBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CumPackets", wireType) - } - m.CumPackets = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CumPackets |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InstBw", wireType) - } - m.InstBw = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.InstBw |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventType) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventType: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventType: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PropertyTypes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PropertyTypes = append(m.PropertyTypes, &EventType_EventProperty{}) - if err := m.PropertyTypes[len(m.PropertyTypes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventType_EventProperty) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventProperty: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventProperty: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= EventType_EventProperty_PropertyType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HasMultiple", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.HasMultiple = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Runtime) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Runtime: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Runtime: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Implementation", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Implementation = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Platform = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EventTypes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EventTypes = append(m.EventTypes, &EventType{}) - if err := m.EventTypes[len(m.EventTypes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EndpointPair) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EndpointPair: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointPair: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SrcMultiaddr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SrcMultiaddr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DstMultiaddr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DstMultiaddr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Traffic) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Traffic: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Traffic: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TrafficIn", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TrafficIn == nil { - m.TrafficIn = &DataGauge{} - } - if err := m.TrafficIn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TrafficOut", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TrafficOut == nil { - m.TrafficOut = &DataGauge{} - } - if err := m.TrafficOut.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StreamList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StreamList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StreamList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StreamIds", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StreamIds = append(m.StreamIds, make([]byte, postIndex-iNdEx)) - copy(m.StreamIds[len(m.StreamIds)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Streams = append(m.Streams, &Stream{}) - if err := m.Streams[len(m.Streams)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Connection) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Connection: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Connection: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...) - if m.Id == nil { - m.Id = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - m.Status = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Status |= Status(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TransportId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TransportId = append(m.TransportId[:0], dAtA[iNdEx:postIndex]...) - if m.TransportId == nil { - m.TransportId = []byte{} - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Endpoints == nil { - m.Endpoints = &EndpointPair{} - } - if err := m.Endpoints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeline", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Timeline == nil { - m.Timeline = &Connection_Timeline{} - } - if err := m.Timeline.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - m.Role = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Role |= Role(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Traffic", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Traffic == nil { - m.Traffic = &Traffic{} - } - if err := m.Traffic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attribs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Attribs == nil { - m.Attribs = &Connection_Attributes{} - } - if err := m.Attribs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LatencyNs", wireType) - } - m.LatencyNs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.LatencyNs |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Streams == nil { - m.Streams = &StreamList{} - } - if err := m.Streams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConnId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := make([]byte, postIndex-iNdEx) - copy(v, dAtA[iNdEx:postIndex]) - m.RelayedOver = &Connection_ConnId{v} - iNdEx = postIndex - case 17: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conn", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &Connection{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.RelayedOver = &Connection_Conn{v} - iNdEx = postIndex - case 99: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserProvidedTags", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UserProvidedTags = append(m.UserProvidedTags, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Connection_Timeline) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Timeline: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Timeline: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OpenTs", wireType) - } - m.OpenTs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OpenTs |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpgradedTs", wireType) - } - m.UpgradedTs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UpgradedTs |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CloseTs", wireType) - } - m.CloseTs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CloseTs |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Connection_Attributes) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Attributes: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Attributes: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Multiplexer", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Multiplexer = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Encryption", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Encryption = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Stream) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Stream: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Stream: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...) - if m.Id == nil { - m.Id = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Protocol = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - m.Role = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Role |= Role(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Traffic", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Traffic == nil { - m.Traffic = &Traffic{} - } - if err := m.Traffic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conn", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Conn == nil { - m.Conn = &Stream_ConnectionRef{} - } - if err := m.Conn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeline", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Timeline == nil { - m.Timeline = &Stream_Timeline{} - } - if err := m.Timeline.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - m.Status = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Status |= Status(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LatencyNs", wireType) - } - m.LatencyNs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.LatencyNs |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 99: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserProvidedTags", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UserProvidedTags = append(m.UserProvidedTags, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Stream_ConnectionRef) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConnectionRef: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConnectionRef: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conn", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &Connection{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Connection = &Stream_ConnectionRef_Conn{v} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConnId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := make([]byte, postIndex-iNdEx) - copy(v, dAtA[iNdEx:postIndex]) - m.Connection = &Stream_ConnectionRef_ConnId{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Stream_Timeline) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Timeline: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Timeline: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OpenTs", wireType) - } - m.OpenTs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OpenTs |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CloseTs", wireType) - } - m.CloseTs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CloseTs |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DHT) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DHT: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DHT: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Protocol = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Enabled = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTs", wireType) - } - m.StartTs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartTs |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Params == nil { - m.Params = &DHT_Params{} - } - if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Buckets", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Buckets = append(m.Buckets, &DHT_Bucket{}) - if err := m.Buckets[len(m.Buckets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IncomingQueries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.IncomingQueries == nil { - m.IncomingQueries = &DHT_QueryGauge{} - } - if err := m.IncomingQueries.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OutgoingQueries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.OutgoingQueries == nil { - m.OutgoingQueries = &DHT_QueryGauge{} - } - if err := m.OutgoingQueries.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DHT_Params) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Params: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field K", wireType) - } - m.K = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.K |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Alpha", wireType) - } - m.Alpha = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Alpha |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DisjointPaths", wireType) - } - m.DisjointPaths = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DisjointPaths |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Beta", wireType) - } - m.Beta = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Beta |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DHT_PeerInDHT) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PeerInDHT: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PeerInDHT: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - m.Status = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Status |= DHT_PeerInDHT_Status(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AgeInBucket", wireType) - } - m.AgeInBucket = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AgeInBucket |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DHT_Bucket) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Bucket: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Bucket: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Cpl", wireType) - } - m.Cpl = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Cpl |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Peers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Peers = append(m.Peers, &DHT_PeerInDHT{}) - if err := m.Peers[len(m.Peers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DHT_QueryGauge) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryGauge: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryGauge: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) - } - m.Success = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Success |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - m.Error = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Error |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) - } - m.Timeout = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Timeout |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Subsystems) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Subsystems: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Subsystems: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Connections", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Connections = append(m.Connections, &Connection{}) - if err := m.Connections[len(m.Connections)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Dht", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Dht == nil { - m.Dht = &DHT{} - } - if err := m.Dht.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *State) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: State: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: State: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subsystems", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Subsystems == nil { - m.Subsystems = &Subsystems{} - } - if err := m.Subsystems.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Traffic", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Traffic == nil { - m.Traffic = &Traffic{} - } - if err := m.Traffic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InstantTs", wireType) - } - m.InstantTs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.InstantTs |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTs", wireType) - } - m.StartTs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StartTs |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SnapshotDurationMs", wireType) - } - m.SnapshotDurationMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SnapshotDurationMs |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Event) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Event: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Type == nil { - m.Type = &EventType{} - } - if err := m.Type.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Ts", wireType) - } - m.Ts = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Ts |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Content", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Content = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServerMessage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServerMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServerMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Version == nil { - m.Version = &Version{} - } - if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &State{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Payload = &ServerMessage_State{v} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &Runtime{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Payload = &ServerMessage_Runtime{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &Event{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Payload = &ServerMessage_Event{v} - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &CommandResponse{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Payload = &ServerMessage_Response{v} - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Notice", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ServerNotice{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Payload = &ServerMessage_Notice{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Configuration) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Configuration: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Configuration: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RetentionPeriodMs", wireType) - } - m.RetentionPeriodMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RetentionPeriodMs |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StateSnapshotIntervalMs", wireType) - } - m.StateSnapshotIntervalMs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.StateSnapshotIntervalMs |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClientCommand) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClientCommand: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClientCommand: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Version == nil { - m.Version = &Version{} - } - if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - m.Id = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Id |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) - } - m.Command = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Command |= ClientCommand_Command(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) - } - m.Source = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Source |= ClientCommand_Source(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Config == nil { - m.Config = &Configuration{} - } - if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CommandResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CommandResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CommandResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - m.Id = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Id |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - m.Result = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Result |= CommandResponse_Result(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EffectiveConfig", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.EffectiveConfig == nil { - m.EffectiveConfig = &Configuration{} - } - if err := m.EffectiveConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServerNotice) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServerNotice: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServerNotice: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - m.Kind = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Kind |= ServerNotice_Kind(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipIntrospection(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowIntrospection - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowIntrospection - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowIntrospection - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthIntrospection - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupIntrospection - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthIntrospection - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthIntrospection = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowIntrospection = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupIntrospection = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/introspection.proto b/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/introspection.proto deleted file mode 100644 index 144e7b99..00000000 --- a/vendor/github.com/libp2p/go-libp2p/core/introspection/pb/introspection.proto +++ /dev/null @@ -1,421 +0,0 @@ -syntax = "proto3"; - -package pb; - -// Version of schema -message Version { - uint32 version = 1; -} -// ResultCounter is a monotonically increasing counter that reports an ok/err breakdown of the total. -message ResultCounter { - uint32 total = 1; - uint32 ok = 2; - uint32 err = 3; -} - -// Moving totals over sliding time windows. Models sensible time windows, -// we don't have to populate them all at once. -// -// Graphical example: -// -// time past -> present an event 16 min ago -// ======================================================X================>> -// | | 1m -// | |---| 5m -// | |-------------| 15m -// |------------X---------------| 30m -// |------------------------------------------X---------------| 60m -message SlidingCounter { - uint32 over_1m = 1; - uint32 over_5m = 2; - uint32 over_15m = 3; - uint32 over_30m = 4; - uint32 over_1hr = 5; - uint32 over_2hr = 6; - uint32 over_4hr = 7; - uint32 over_8hr = 8; - uint32 over_12hr = 9; - uint32 over_24hr = 10; -} - -// DataGauge reports stats for data traffic in a given direction. -message DataGauge { - // Cumulative bytes. - uint64 cum_bytes = 1; - // Cumulative packets. - uint64 cum_packets = 2; - // Instantaneous bandwidth measurement (bytes/second). - uint64 inst_bw = 3; -} - -// describes a type of event -message EventType { - // metadata about content types in event's top-level content JSON - message EventProperty { - // tells client how to sort, filter or display known content properties - enum PropertyType { - // for properties to treat as a simple primitive - STRING = 0; // default - NUMBER = 1; - // for properties with special human-readable formatting - TIME = 10; - PEERID = 11; - MULTIADDR = 12; - // for complex structures like nested arrays, object trees etc - JSON = 90; - } - // property name of content e.g. openTs - string name = 1; - // type to interpret content value as - PropertyType type = 2; - // if true, expect an array of values of `type`; else, singular - bool has_multiple = 3; - } - - // name of event type, e.g. PeerConnecting - string name = 1; - // for runtime, send property_types for all events already seen in events list - // for events, only send property_types in the first event of a type not in runtime - repeated EventProperty property_types = 2; -} - -// Runtime encapsulates runtime info about a node. -message Runtime { - // e.g. go-libp2p, js-libp2p, rust-libp2p, etc. - string implementation = 1; - // e.g. 1.2.3. - string version = 2; - // e.g. Windows, Unix, macOS, Chrome, Mozilla, etc. - string platform = 3; - // our peer id - the peer id of the host system - string peer_id = 4; - // metadata describing configured event types - repeated EventType event_types = 7; -} - -// EndpointPair is a pair of multiaddrs. -message EndpointPair { - // the source multiaddr. - string src_multiaddr = 1; - // the destination multiaddr. - string dst_multiaddr = 2; -} - -// The status of a connection or stream. -enum Status { - ACTIVE = 0; - CLOSED = 1; - OPENING = 2; - CLOSING = 3; - ERROR = 4; -} - -// Our role in a connection or stream. -enum Role { - INITIATOR = 0; - RESPONDER = 1; -} - -// Traffic encloses data transfer statistics. -message Traffic { - // snapshot of the data in metrics. - DataGauge traffic_in = 1; - // snapshot of the data out metrics. - DataGauge traffic_out = 2; -} - -// a list of streams, by reference or inlined. -message StreamList { - // NOTE: only one of the next 2 fields can appear, but proto3 - // doesn't support combining oneof and repeated. - // - // streams within this connection by reference. - repeated bytes stream_ids = 1; - // streams within this connection by inlining. - repeated Stream streams = 2; -} - -// Connection reports metrics and state of a libp2p connection. -message Connection { - // Timeline contains the timestamps (ms since epoch) of the well-known milestones of a connection. - message Timeline { - // the instant when a connection was opened on the wire. - uint64 open_ts = 1; - // the instant when the upgrade process (handshake, security, multiplexing) finished. - uint64 upgraded_ts = 2; - // the instant when this connection was terminated. - uint64 close_ts = 3; - } - - // Attributes encapsulates the attributes of this connection. - message Attributes { - // the multiplexer being used. - string multiplexer = 1; - // the encryption method being used. - string encryption = 2; - } - - // the id of this connection, not to be shown in user tooling, - // used for (cross)referencing connections (e.g. relay). - bytes id = 1; - // the peer id of the other party. - string peer_id = 2; - // the status of this connection. - Status status = 3; - // a reference to the transport managing this connection. - bytes transport_id = 4; - // the endpoints participating in this connection. - EndpointPair endpoints = 5; - // the timeline of the connection, see Connection.Timeline. - Timeline timeline = 6; - // our role in this connection. - Role role = 7; - // traffic statistics. - Traffic traffic = 8; - // properties of this connection. - Attributes attribs = 9; - // the instantaneous latency of this connection in nanoseconds. - uint64 latency_ns = 10; - // streams within this connection. - StreamList streams = 11; - - reserved 12 to 15; - - // if this is a relayed connection, this points to the relaying connection. - // a default value here (empty bytes) indicates this is not a relayed connection. - oneof relayed_over { - bytes conn_id = 16; - Connection conn = 17; - } - // user provided tags. - repeated string user_provided_tags = 99; -} - -// Stream reports metrics and state of a libp2p stream. -message Stream { - message ConnectionRef { - oneof connection { - // the parent connection inlined. - Connection conn = 1; - // the parent connection by reference. - bytes conn_id = 2; - } - } - - // Timeline contains the timestamps (ms since epoch) of the well-known milestones of a stream. - message Timeline { - // the instant when the stream was opened. - uint64 open_ts = 1; - // the instant when the stream was terminated. - uint64 close_ts = 2; - } - - // the id of this stream, not to be shown in user tooling, - // used for (cross)referencing streams. - bytes id = 1; - // the protocol pinned to this stream. - string protocol = 2; - // our role in this stream. - Role role = 3; - // traffic statistics. - Traffic traffic = 4; - // the connection this stream is hosted under. - ConnectionRef conn = 5; - // the timeline of the stream, see Stream.Timeline. - Timeline timeline = 6; - // the status of this stream. - Status status = 7; - - // the instantaneous latency of this stream in nanoseconds. - // TODO: this is hard to calculate. - uint64 latency_ns = 16; - // user provided tags. - repeated string user_provided_tags = 99; -} - -// DHT metrics and state. -message DHT { - message Params { - // routing table bucket size. - uint64 k = 1; - // concurrency of asynchronous requests. - uint64 alpha = 2; - // number of disjoint paths to use. - uint64 disjoint_paths = 3; - // number of peers closest to a target that must have responded - // in order for a given query path to complete - uint64 beta = 4; - } - - // Peer in DHT - message PeerInDHT { - // The DHT's relationship with this peer - enum Status { - // Connected, in a bucket, ready to send/receive queries - ACTIVE = 0; - // Not currently connected, still "in" a bucket (e.g. temporarily disconnected) - MISSING = 1; - // Removed from a bucket or candidate list (e.g. connection lost or too slow) - REJECTED = 2; - // Was reachable when last checked, waiting to join a currently-full bucket - CANDIDATE = 3; - } - // the peer id of the host system - string peer_id = 1; - // the peer's status when data snapshot is taken - Status status = 2; - // age in bucket (ms) - uint32 age_in_bucket = 3; - } - - // A "k-bucket" containing peers of a certain kadamelia distance - message Bucket { - // CPL (Common Prefix Length) is the length of the common prefix - // between the ids of every peer in this bucket and the DHT peer id - uint32 cpl = 1; - // Peers associated with this bucket - repeated PeerInDHT peers = 2; - // Bucket may need more fields depending on WIP remodeling - } - - // Counters of query events, by status - message QueryGauge { - // Cumulative counter of queries with "SUCCESS" status - uint64 success = 1; - // Cumulative counter of queries with "ERROR" status - uint64 error = 2; - // Cumulative counter of queries with "TIMEOUT" status - uint64 timeout = 3; - } - - // DHT protocol name - string protocol = 1; - // protocol enabled. - bool enabled = 2; - // timestamp (ms since epoch) of start up. - uint64 start_ts = 3; - // params of the dht. - Params params = 4; - // existing, intantiated buckets and their contents - repeated Bucket buckets = 5; - // counts inbound queries received from other peers - QueryGauge incoming_queries = 6; - // counts outbound queries dispatched by this peer - QueryGauge outgoing_queries = 7; -} - -// Subsystems encapsulates all instrumented subsystems for a libp2p host. -message Subsystems { - // connections data, source agnostic but currently only supports the Swarm subsystem - repeated Connection connections = 1; - // the DHT subsystem. - DHT dht = 2; -} - -// Connections and streams output for a time interval is one of these. -message State { - // list of connections - Subsystems subsystems = 1; - // overall traffic for this peer - Traffic traffic = 2; - // moment this data snapshot and instantaneous values were taken - uint64 instant_ts = 3; - // start of included data collection (cumulative values counted from here) - uint64 start_ts = 4; - // length of time up to instant_ts covered by this data snapshot - uint32 snapshot_duration_ms = 5; -} - -// Event -message Event { - // definition of event type, containing only `name` unless this is first encounter of novel event - EventType type = 1; - // time this event occurred (ms since epoch) - uint64 ts = 2; - // stringified json; top-level keys and value types match EventProperty definitions - string content = 3; -} - -// ServerMessage wraps messages to be sent to clients to allow extension -// based on new types of data sources -message ServerMessage { - // Version of this protobuf. - Version version = 1; - // The payload this message contains. - oneof payload { - State state = 2; - Runtime runtime = 3; - Event event = 4; - - CommandResponse response = 5; - ServerNotice notice = 6; - } -} - -// Configuration encapsulates configuration fields for the protocol and commands. -message Configuration { - uint64 retention_period_ms = 1; - uint64 state_snapshot_interval_ms = 2; -} - -// ClientCommand is a command sent from the client to the server. -message ClientCommand { - enum Source { - STATE = 0; // full state snapshot. - RUNTIME = 1; // runtime data message. - EVENTS = 2; // eventbus events. - } - - enum Command { - // HELLO is the first command that a client must send to greet the server. - // Connections that do not respect this invariant will be terminated. - HELLO = 0; - - // REQUEST is applicable to STATE and RUNTIME sources. - REQUEST = 1; - - // PUSH streams can only be started for STATE and EVENTS sources. - PUSH_ENABLE = 2; // enables pushing for a given source. - PUSH_DISABLE = 3; // disables pushing for a given source. - PUSH_PAUSE = 4; // pauses pushing for all sources. - PUSH_RESUME = 5; // resumes pushing for all sources. - - // UPDATE_CONFIG requests a configuration update. The config field is - // compulsory. - // - // The server reserves the right to override the requested values, and - // will return the effective configuration in the response. - UPDATE_CONFIG = 7; - } - - Version version = 1; - uint64 id = 2; // a unique ID for this request. - Command command = 3; - Source source = 4; - Configuration config = 5; -} - -// CommandResponse is a response to a command sent by the client. -message CommandResponse { - enum Result { - OK = 0; - ERR = 1; - } - - uint64 id = 1; // for correlation with the request. - Result result = 2; - string error = 3; - - // effective_config is the effective configuration the server holds for - // this connection. It is returned in response to HELLO and UPDATE_CONFIG - // commands. - Configuration effective_config = 4; -} - -// ServerNotice represents a NOTICE sent from the server to the client. -message ServerNotice { - enum Kind { - DISCARDING_EVENTS = 0; - } - Kind kind = 1; -} \ No newline at end of file diff --git a/vendor/github.com/libp2p/go-libp2p/core/network/conn.go b/vendor/github.com/libp2p/go-libp2p/core/network/conn.go index 6d16d298..3be8cb0d 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/network/conn.go +++ b/vendor/github.com/libp2p/go-libp2p/core/network/conn.go @@ -6,6 +6,7 @@ import ( ic "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" ma "github.com/multiformats/go-multiaddr" ) @@ -32,16 +33,22 @@ type Conn interface { // GetStreams returns all open streams over this conn. GetStreams() []Stream + + // IsClosed returns whether a connection is fully closed, so it can + // be garbage collected. + IsClosed() bool } // ConnectionState holds information about the connection. type ConnectionState struct { // The stream multiplexer used on this connection (if any). For example: /yamux/1.0.0 - StreamMultiplexer string + StreamMultiplexer protocol.ID // The security protocol used on this connection (if any). For example: /tls/1.0.0 - Security string + Security protocol.ID // the transport used on this connection. For example: tcp Transport string + // indicates whether StreamMultiplexer was selected using inlined muxer negotiation + UsedEarlyMuxerNegotiation bool } // ConnSecurity is the interface that one can mix into a connection interface to @@ -50,9 +57,6 @@ type ConnSecurity interface { // LocalPeer returns our peer ID LocalPeer() peer.ID - // LocalPrivateKey returns our private key - LocalPrivateKey() ic.PrivKey - // RemotePeer returns the peer ID of the remote peer. RemotePeer() peer.ID diff --git a/vendor/github.com/libp2p/go-libp2p/core/network/network.go b/vendor/github.com/libp2p/go-libp2p/core/network/network.go index 0beaac0f..47908b8e 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/network/network.go +++ b/vendor/github.com/libp2p/go-libp2p/core/network/network.go @@ -6,8 +6,10 @@ package network import ( + "bytes" "context" "io" + "sort" "time" "github.com/libp2p/go-libp2p/core/peer" @@ -184,3 +186,23 @@ type Dialer interface { Notify(Notifiee) StopNotify(Notifiee) } + +// DedupAddrs deduplicates addresses in place, leave only unique addresses. +// It doesn't allocate. +func DedupAddrs(addrs []ma.Multiaddr) []ma.Multiaddr { + if len(addrs) == 0 { + return addrs + } + sort.Slice(addrs, func(i, j int) bool { return bytes.Compare(addrs[i].Bytes(), addrs[j].Bytes()) < 0 }) + idx := 1 + for i := 1; i < len(addrs); i++ { + if !addrs[i-1].Equal(addrs[i]) { + addrs[idx] = addrs[i] + idx++ + } + } + for i := idx; i < len(addrs); i++ { + addrs[i] = nil + } + return addrs[:idx] +} diff --git a/vendor/github.com/libp2p/go-libp2p/core/peer/pb/Makefile b/vendor/github.com/libp2p/go-libp2p/core/peer/pb/Makefile deleted file mode 100644 index 7cf8222f..00000000 --- a/vendor/github.com/libp2p/go-libp2p/core/peer/pb/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -PB = $(wildcard *.proto) -GO = $(PB:.proto=.pb.go) - -all: $(GO) - -%.pb.go: %.proto - protoc --proto_path=$(PWD):$(PWD)/../.. --gogofaster_out=. $< - -clean: - rm -f *.pb.go - rm -f *.go diff --git a/vendor/github.com/libp2p/go-libp2p/core/peer/pb/peer_record.pb.go b/vendor/github.com/libp2p/go-libp2p/core/peer/pb/peer_record.pb.go index 36040c3c..2aa8a07a 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/peer/pb/peer_record.pb.go +++ b/vendor/github.com/libp2p/go-libp2p/core/peer/pb/peer_record.pb.go @@ -1,27 +1,24 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: peer_record.proto +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: pb/peer_record.proto -package peer_pb +package pb import ( - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - proto "github.com/gogo/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // PeerRecord messages contain information that is useful to share with other peers. // Currently, a PeerRecord contains the public listen addresses for a peer, but this @@ -32,6 +29,10 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // See https://github.com/libp2p/go-libp2p/core/record/pb/envelope.proto for // the SignedEnvelope definition. type PeerRecord struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // peer_id contains a libp2p peer id in its binary representation. PeerId []byte `protobuf:"bytes,1,opt,name=peer_id,json=peerId,proto3" json:"peer_id,omitempty"` // seq contains a monotonically-increasing sequence counter to order PeerRecords in time. @@ -40,56 +41,55 @@ type PeerRecord struct { Addresses []*PeerRecord_AddressInfo `protobuf:"bytes,3,rep,name=addresses,proto3" json:"addresses,omitempty"` } -func (m *PeerRecord) Reset() { *m = PeerRecord{} } -func (m *PeerRecord) String() string { return proto.CompactTextString(m) } -func (*PeerRecord) ProtoMessage() {} -func (*PeerRecord) Descriptor() ([]byte, []int) { - return fileDescriptor_dc0d8059ab0ad14d, []int{0} +func (x *PeerRecord) Reset() { + *x = PeerRecord{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_peer_record_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *PeerRecord) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + +func (x *PeerRecord) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PeerRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PeerRecord.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (*PeerRecord) ProtoMessage() {} + +func (x *PeerRecord) ProtoReflect() protoreflect.Message { + mi := &file_pb_peer_record_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *PeerRecord) XXX_Merge(src proto.Message) { - xxx_messageInfo_PeerRecord.Merge(m, src) -} -func (m *PeerRecord) XXX_Size() int { - return m.Size() -} -func (m *PeerRecord) XXX_DiscardUnknown() { - xxx_messageInfo_PeerRecord.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_PeerRecord proto.InternalMessageInfo +// Deprecated: Use PeerRecord.ProtoReflect.Descriptor instead. +func (*PeerRecord) Descriptor() ([]byte, []int) { + return file_pb_peer_record_proto_rawDescGZIP(), []int{0} +} -func (m *PeerRecord) GetPeerId() []byte { - if m != nil { - return m.PeerId +func (x *PeerRecord) GetPeerId() []byte { + if x != nil { + return x.PeerId } return nil } -func (m *PeerRecord) GetSeq() uint64 { - if m != nil { - return m.Seq +func (x *PeerRecord) GetSeq() uint64 { + if x != nil { + return x.Seq } return 0 } -func (m *PeerRecord) GetAddresses() []*PeerRecord_AddressInfo { - if m != nil { - return m.Addresses +func (x *PeerRecord) GetAddresses() []*PeerRecord_AddressInfo { + if x != nil { + return x.Addresses } return nil } @@ -97,511 +97,143 @@ func (m *PeerRecord) GetAddresses() []*PeerRecord_AddressInfo { // AddressInfo is a wrapper around a binary multiaddr. It is defined as a // separate message to allow us to add per-address metadata in the future. type PeerRecord_AddressInfo struct { - Multiaddr []byte `protobuf:"bytes,1,opt,name=multiaddr,proto3" json:"multiaddr,omitempty"` -} - -func (m *PeerRecord_AddressInfo) Reset() { *m = PeerRecord_AddressInfo{} } -func (m *PeerRecord_AddressInfo) String() string { return proto.CompactTextString(m) } -func (*PeerRecord_AddressInfo) ProtoMessage() {} -func (*PeerRecord_AddressInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_dc0d8059ab0ad14d, []int{0, 0} -} -func (m *PeerRecord_AddressInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PeerRecord_AddressInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PeerRecord_AddressInfo.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PeerRecord_AddressInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_PeerRecord_AddressInfo.Merge(m, src) -} -func (m *PeerRecord_AddressInfo) XXX_Size() int { - return m.Size() -} -func (m *PeerRecord_AddressInfo) XXX_DiscardUnknown() { - xxx_messageInfo_PeerRecord_AddressInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_PeerRecord_AddressInfo proto.InternalMessageInfo - -func (m *PeerRecord_AddressInfo) GetMultiaddr() []byte { - if m != nil { - return m.Multiaddr - } - return nil -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func init() { - proto.RegisterType((*PeerRecord)(nil), "peer.pb.PeerRecord") - proto.RegisterType((*PeerRecord_AddressInfo)(nil), "peer.pb.PeerRecord.AddressInfo") -} - -func init() { proto.RegisterFile("peer_record.proto", fileDescriptor_dc0d8059ab0ad14d) } - -var fileDescriptor_dc0d8059ab0ad14d = []byte{ - // 189 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2c, 0x48, 0x4d, 0x2d, - 0x8a, 0x2f, 0x4a, 0x4d, 0xce, 0x2f, 0x4a, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07, - 0x09, 0xe9, 0x15, 0x24, 0x29, 0x2d, 0x66, 0xe4, 0xe2, 0x0a, 0x48, 0x4d, 0x2d, 0x0a, 0x02, 0xcb, - 0x0a, 0x89, 0x73, 0x81, 0x65, 0xe2, 0x33, 0x53, 0x24, 0x18, 0x15, 0x18, 0x35, 0x78, 0x82, 0xd8, - 0x40, 0x5c, 0xcf, 0x14, 0x21, 0x01, 0x2e, 0xe6, 0xe2, 0xd4, 0x42, 0x09, 0x26, 0x05, 0x46, 0x0d, - 0x96, 0x20, 0x10, 0x53, 0xc8, 0x96, 0x8b, 0x33, 0x31, 0x25, 0xa5, 0x28, 0xb5, 0xb8, 0x38, 0xb5, - 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x5e, 0x0f, 0x6a, 0xac, 0x1e, 0xc2, 0x48, 0x3d, - 0x47, 0x88, 0x22, 0xcf, 0xbc, 0xb4, 0xfc, 0x20, 0x84, 0x0e, 0x29, 0x6d, 0x2e, 0x6e, 0x24, 0x19, - 0x21, 0x19, 0x2e, 0xce, 0xdc, 0xd2, 0x9c, 0x92, 0x4c, 0x90, 0x02, 0xa8, 0xd5, 0x08, 0x01, 0x27, - 0x89, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, - 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x48, 0x62, 0x03, 0xfb, 0xc7, 0x18, - 0x10, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x99, 0x56, 0x19, 0xe4, 0x00, 0x00, 0x00, -} - -func (m *PeerRecord) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PeerRecord) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PeerRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Addresses) > 0 { - for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPeerRecord(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.Seq != 0 { - i = encodeVarintPeerRecord(dAtA, i, uint64(m.Seq)) - i-- - dAtA[i] = 0x10 - } - if len(m.PeerId) > 0 { - i -= len(m.PeerId) - copy(dAtA[i:], m.PeerId) - i = encodeVarintPeerRecord(dAtA, i, uint64(len(m.PeerId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + Multiaddr []byte `protobuf:"bytes,1,opt,name=multiaddr,proto3" json:"multiaddr,omitempty"` } -func (m *PeerRecord_AddressInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *PeerRecord_AddressInfo) Reset() { + *x = PeerRecord_AddressInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_peer_record_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *PeerRecord_AddressInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *PeerRecord_AddressInfo) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PeerRecord_AddressInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Multiaddr) > 0 { - i -= len(m.Multiaddr) - copy(dAtA[i:], m.Multiaddr) - i = encodeVarintPeerRecord(dAtA, i, uint64(len(m.Multiaddr))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} +func (*PeerRecord_AddressInfo) ProtoMessage() {} -func encodeVarintPeerRecord(dAtA []byte, offset int, v uint64) int { - offset -= sovPeerRecord(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *PeerRecord) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PeerId) - if l > 0 { - n += 1 + l + sovPeerRecord(uint64(l)) - } - if m.Seq != 0 { - n += 1 + sovPeerRecord(uint64(m.Seq)) - } - if len(m.Addresses) > 0 { - for _, e := range m.Addresses { - l = e.Size() - n += 1 + l + sovPeerRecord(uint64(l)) +func (x *PeerRecord_AddressInfo) ProtoReflect() protoreflect.Message { + mi := &file_pb_peer_record_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } - return n + return mi.MessageOf(x) } -func (m *PeerRecord_AddressInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Multiaddr) - if l > 0 { - n += 1 + l + sovPeerRecord(uint64(l)) - } - return n -} - -func sovPeerRecord(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozPeerRecord(x uint64) (n int) { - return sovPeerRecord(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +// Deprecated: Use PeerRecord_AddressInfo.ProtoReflect.Descriptor instead. +func (*PeerRecord_AddressInfo) Descriptor() ([]byte, []int) { + return file_pb_peer_record_proto_rawDescGZIP(), []int{0, 0} } -func (m *PeerRecord) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PeerRecord: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PeerRecord: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerId", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthPeerRecord - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthPeerRecord - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerId = append(m.PeerId[:0], dAtA[iNdEx:postIndex]...) - if m.PeerId == nil { - m.PeerId = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Seq", wireType) - } - m.Seq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Seq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPeerRecord - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPeerRecord - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addresses = append(m.Addresses, &PeerRecord_AddressInfo{}) - if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPeerRecord(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPeerRecord - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthPeerRecord - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *PeerRecord_AddressInfo) GetMultiaddr() []byte { + if x != nil { + return x.Multiaddr } return nil } -func (m *PeerRecord_AddressInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AddressInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AddressInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Multiaddr", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPeerRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthPeerRecord - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthPeerRecord - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Multiaddr = append(m.Multiaddr[:0], dAtA[iNdEx:postIndex]...) - if m.Multiaddr == nil { - m.Multiaddr = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPeerRecord(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPeerRecord - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthPeerRecord - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipPeerRecord(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPeerRecord - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPeerRecord - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPeerRecord - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthPeerRecord - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupPeerRecord - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthPeerRecord - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +var File_pb_peer_record_proto protoreflect.FileDescriptor + +var file_pb_peer_record_proto_rawDesc = []byte{ + 0x0a, 0x14, 0x70, 0x62, 0x2f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x70, 0x65, 0x65, 0x72, 0x2e, 0x70, 0x62, 0x22, + 0xa3, 0x01, 0x0a, 0x0a, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x17, + 0x0a, 0x07, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x65, 0x71, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x73, 0x65, 0x71, 0x12, 0x3d, 0x0a, 0x09, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x70, + 0x65, 0x65, 0x72, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x63, 0x6f, 0x72, + 0x64, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x1a, 0x2b, 0x0a, 0x0b, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x09, 0x6d, 0x75, 0x6c, 0x74, 0x69, + 0x61, 0x64, 0x64, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x61, 0x64, 0x64, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthPeerRecord = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPeerRecord = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupPeerRecord = fmt.Errorf("proto: unexpected end of group") + file_pb_peer_record_proto_rawDescOnce sync.Once + file_pb_peer_record_proto_rawDescData = file_pb_peer_record_proto_rawDesc ) + +func file_pb_peer_record_proto_rawDescGZIP() []byte { + file_pb_peer_record_proto_rawDescOnce.Do(func() { + file_pb_peer_record_proto_rawDescData = protoimpl.X.CompressGZIP(file_pb_peer_record_proto_rawDescData) + }) + return file_pb_peer_record_proto_rawDescData +} + +var file_pb_peer_record_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_pb_peer_record_proto_goTypes = []interface{}{ + (*PeerRecord)(nil), // 0: peer.pb.PeerRecord + (*PeerRecord_AddressInfo)(nil), // 1: peer.pb.PeerRecord.AddressInfo +} +var file_pb_peer_record_proto_depIdxs = []int32{ + 1, // 0: peer.pb.PeerRecord.addresses:type_name -> peer.pb.PeerRecord.AddressInfo + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_pb_peer_record_proto_init() } +func file_pb_peer_record_proto_init() { + if File_pb_peer_record_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pb_peer_record_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeerRecord); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pb_peer_record_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PeerRecord_AddressInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pb_peer_record_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pb_peer_record_proto_goTypes, + DependencyIndexes: file_pb_peer_record_proto_depIdxs, + MessageInfos: file_pb_peer_record_proto_msgTypes, + }.Build() + File_pb_peer_record_proto = out.File + file_pb_peer_record_proto_rawDesc = nil + file_pb_peer_record_proto_goTypes = nil + file_pb_peer_record_proto_depIdxs = nil +} diff --git a/vendor/github.com/libp2p/go-libp2p/core/peer/peer_serde.go b/vendor/github.com/libp2p/go-libp2p/core/peer/peer_serde.go index e3ac3f2c..5fd1cd50 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/peer/peer_serde.go +++ b/vendor/github.com/libp2p/go-libp2p/core/peer/peer_serde.go @@ -40,8 +40,6 @@ func (id *ID) UnmarshalBinary(data []byte) error { return id.Unmarshal(data) } -// Size implements Gogo's proto.Sizer, but we omit the compile-time assertion to avoid introducing a hard -// dependency on gogo. func (id ID) Size() int { return len([]byte(id)) } diff --git a/vendor/github.com/libp2p/go-libp2p/core/peer/record.go b/vendor/github.com/libp2p/go-libp2p/core/peer/record.go index b502c8dd..0fc7e552 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/peer/record.go +++ b/vendor/github.com/libp2p/go-libp2p/core/peer/record.go @@ -6,14 +6,16 @@ import ( "time" "github.com/libp2p/go-libp2p/core/internal/catch" - pb "github.com/libp2p/go-libp2p/core/peer/pb" + "github.com/libp2p/go-libp2p/core/peer/pb" "github.com/libp2p/go-libp2p/core/record" ma "github.com/multiformats/go-multiaddr" - "github.com/gogo/protobuf/proto" + "google.golang.org/protobuf/proto" ) +//go:generate protoc --proto_path=$PWD:$PWD/../.. --go_out=. --go_opt=Mpb/peer_record.proto=./pb pb/peer_record.proto + var _ record.Record = (*PeerRecord)(nil) func init() { diff --git a/vendor/github.com/libp2p/go-libp2p/core/peerstore/peerstore.go b/vendor/github.com/libp2p/go-libp2p/core/peerstore/peerstore.go index 7561f32b..b63582af 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/peerstore/peerstore.go +++ b/vendor/github.com/libp2p/go-libp2p/core/peerstore/peerstore.go @@ -11,6 +11,7 @@ import ( ic "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/core/record" ma "github.com/multiformats/go-multiaddr" @@ -230,19 +231,19 @@ type Metrics interface { // ProtoBook tracks the protocols supported by peers. type ProtoBook interface { - GetProtocols(peer.ID) ([]string, error) - AddProtocols(peer.ID, ...string) error - SetProtocols(peer.ID, ...string) error - RemoveProtocols(peer.ID, ...string) error + GetProtocols(peer.ID) ([]protocol.ID, error) + AddProtocols(peer.ID, ...protocol.ID) error + SetProtocols(peer.ID, ...protocol.ID) error + RemoveProtocols(peer.ID, ...protocol.ID) error // SupportsProtocols returns the set of protocols the peer supports from among the given protocols. // If the returned error is not nil, the result is indeterminate. - SupportsProtocols(peer.ID, ...string) ([]string, error) + SupportsProtocols(peer.ID, ...protocol.ID) ([]protocol.ID, error) // FirstSupportedProtocol returns the first protocol that the peer supports among the given protocols. - // If the peer does not support any of the given protocols, this function will return an empty string and a nil error. + // If the peer does not support any of the given protocols, this function will return an empty protocol.ID and a nil error. // If the returned error is not nil, the result is indeterminate. - FirstSupportedProtocol(peer.ID, ...string) (string, error) + FirstSupportedProtocol(peer.ID, ...protocol.ID) (protocol.ID, error) // RemovePeer removes all protocols associated with a peer. RemovePeer(peer.ID) diff --git a/vendor/github.com/libp2p/go-libp2p/core/protocol/switch.go b/vendor/github.com/libp2p/go-libp2p/core/protocol/switch.go index f839e016..683ef56f 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/protocol/switch.go +++ b/vendor/github.com/libp2p/go-libp2p/core/protocol/switch.go @@ -3,6 +3,8 @@ package protocol import ( "io" + + "github.com/multiformats/go-multistream" ) // HandlerFunc is a user-provided function used by the Router to @@ -11,7 +13,7 @@ import ( // Will be invoked with the protocol ID string as the first argument, // which may differ from the ID used for registration if the handler // was registered using a match function. -type HandlerFunc = func(protocol string, rwc io.ReadWriteCloser) error +type HandlerFunc = multistream.HandlerFunc[ID] // Router is an interface that allows users to add and remove protocol handlers, // which will be invoked when incoming stream requests for registered protocols @@ -25,7 +27,7 @@ type Router interface { // AddHandler registers the given handler to be invoked for // an exact literal match of the given protocol ID string. - AddHandler(protocol string, handler HandlerFunc) + AddHandler(protocol ID, handler HandlerFunc) // AddHandlerWithFunc registers the given handler to be invoked // when the provided match function returns true. @@ -35,17 +37,17 @@ type Router interface { // the protocol. Note that the protocol ID argument is not // used for matching; if you want to match the protocol ID // string exactly, you must check for it in your match function. - AddHandlerWithFunc(protocol string, match func(string) bool, handler HandlerFunc) + AddHandlerWithFunc(protocol ID, match func(ID) bool, handler HandlerFunc) // RemoveHandler removes the registered handler (if any) for the // given protocol ID string. - RemoveHandler(protocol string) + RemoveHandler(protocol ID) // Protocols returns a list of all registered protocol ID strings. // Note that the Router may be able to handle protocol IDs not // included in this list if handlers were added with match functions // using AddHandlerWithFunc. - Protocols() []string + Protocols() []ID } // Negotiator is a component capable of reaching agreement over what protocols @@ -55,7 +57,7 @@ type Negotiator interface { // inbound stream, returning after the protocol has been determined and the // Negotiator has finished using the stream for negotiation. Returns an // error if negotiation fails. - Negotiate(rwc io.ReadWriteCloser) (string, HandlerFunc, error) + Negotiate(rwc io.ReadWriteCloser) (ID, HandlerFunc, error) // Handle calls Negotiate to determine which protocol handler to use for an // inbound stream, then invokes the protocol handler function, passing it diff --git a/vendor/github.com/libp2p/go-libp2p/core/record/envelope.go b/vendor/github.com/libp2p/go-libp2p/core/record/envelope.go index 38811994..86ad1425 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/record/envelope.go +++ b/vendor/github.com/libp2p/go-libp2p/core/record/envelope.go @@ -8,14 +8,16 @@ import ( "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/internal/catch" - pb "github.com/libp2p/go-libp2p/core/record/pb" + "github.com/libp2p/go-libp2p/core/record/pb" pool "github.com/libp2p/go-buffer-pool" - "github.com/gogo/protobuf/proto" "github.com/multiformats/go-varint" + "google.golang.org/protobuf/proto" ) +//go:generate protoc --proto_path=$PWD:$PWD/../.. --go_out=. --go_opt=Mpb/envelope.proto=./pb pb/envelope.proto + // Envelope contains an arbitrary []byte payload, signed by a libp2p peer. // // Envelopes are signed in the context of a particular "domain", which is a @@ -104,11 +106,6 @@ func Seal(rec Record, privateKey crypto.PrivKey) (*Envelope, error) { // doSomethingWithPeerRecord(peerRec) // } // -// Important: you MUST check the error value before using the returned Envelope. In some error -// cases, including when the envelope signature is invalid, both the Envelope and an error will -// be returned. This allows you to inspect the unmarshalled but invalid Envelope. As a result, -// you must not assume that any non-nil Envelope returned from this function is valid. -// // If the Envelope signature is valid, but no Record type is registered for the Envelope's // PayloadType, ErrPayloadTypeNotRegistered will be returned, along with the Envelope and // a nil Record. @@ -120,12 +117,12 @@ func ConsumeEnvelope(data []byte, domain string) (envelope *Envelope, rec Record err = e.validate(domain) if err != nil { - return e, nil, fmt.Errorf("failed to validate envelope: %w", err) + return nil, nil, fmt.Errorf("failed to validate envelope: %w", err) } rec, err = e.Record() if err != nil { - return e, nil, fmt.Errorf("failed to unmarshal envelope payload: %w", err) + return nil, nil, fmt.Errorf("failed to unmarshal envelope payload: %w", err) } return e, rec, nil } diff --git a/vendor/github.com/libp2p/go-libp2p/core/record/pb/Makefile b/vendor/github.com/libp2p/go-libp2p/core/record/pb/Makefile deleted file mode 100644 index 7cf8222f..00000000 --- a/vendor/github.com/libp2p/go-libp2p/core/record/pb/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -PB = $(wildcard *.proto) -GO = $(PB:.proto=.pb.go) - -all: $(GO) - -%.pb.go: %.proto - protoc --proto_path=$(PWD):$(PWD)/../.. --gogofaster_out=. $< - -clean: - rm -f *.pb.go - rm -f *.go diff --git a/vendor/github.com/libp2p/go-libp2p/core/record/pb/envelope.pb.go b/vendor/github.com/libp2p/go-libp2p/core/record/pb/envelope.pb.go index a9990217..1d3c7f25 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/record/pb/envelope.pb.go +++ b/vendor/github.com/libp2p/go-libp2p/core/record/pb/envelope.pb.go @@ -1,28 +1,25 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: envelope.proto +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: pb/envelope.proto -package record_pb +package pb import ( - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - proto "github.com/gogo/protobuf/proto" pb "github.com/libp2p/go-libp2p/core/crypto/pb" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // Envelope encloses a signed payload produced by a peer, along with the public // key of the keypair it was signed with so that it can be statelessly validated @@ -32,6 +29,10 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // can be deserialized deterministically. Often, this byte string is a // multicodec. type Envelope struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // public_key is the public key of the keypair the enclosed payload was // signed with. PublicKey *pb.PublicKey `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` @@ -46,460 +47,146 @@ type Envelope struct { Signature []byte `protobuf:"bytes,5,opt,name=signature,proto3" json:"signature,omitempty"` } -func (m *Envelope) Reset() { *m = Envelope{} } -func (m *Envelope) String() string { return proto.CompactTextString(m) } -func (*Envelope) ProtoMessage() {} -func (*Envelope) Descriptor() ([]byte, []int) { - return fileDescriptor_ee266e8c558e9dc5, []int{0} -} -func (m *Envelope) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Envelope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Envelope.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Envelope) Reset() { + *x = Envelope{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_envelope_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *Envelope) XXX_Merge(src proto.Message) { - xxx_messageInfo_Envelope.Merge(m, src) -} -func (m *Envelope) XXX_Size() int { - return m.Size() -} -func (m *Envelope) XXX_DiscardUnknown() { - xxx_messageInfo_Envelope.DiscardUnknown(m) + +func (x *Envelope) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_Envelope proto.InternalMessageInfo +func (*Envelope) ProtoMessage() {} -func (m *Envelope) GetPublicKey() *pb.PublicKey { - if m != nil { - return m.PublicKey +func (x *Envelope) ProtoReflect() protoreflect.Message { + mi := &file_pb_envelope_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -func (m *Envelope) GetPayloadType() []byte { - if m != nil { - return m.PayloadType - } - return nil +// Deprecated: Use Envelope.ProtoReflect.Descriptor instead. +func (*Envelope) Descriptor() ([]byte, []int) { + return file_pb_envelope_proto_rawDescGZIP(), []int{0} } -func (m *Envelope) GetPayload() []byte { - if m != nil { - return m.Payload +func (x *Envelope) GetPublicKey() *pb.PublicKey { + if x != nil { + return x.PublicKey } return nil } -func (m *Envelope) GetSignature() []byte { - if m != nil { - return m.Signature +func (x *Envelope) GetPayloadType() []byte { + if x != nil { + return x.PayloadType } return nil } -func init() { - proto.RegisterType((*Envelope)(nil), "record.pb.Envelope") -} - -func init() { proto.RegisterFile("envelope.proto", fileDescriptor_ee266e8c558e9dc5) } - -var fileDescriptor_ee266e8c558e9dc5 = []byte{ - // 205 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0xcd, 0x2b, 0x4b, - 0xcd, 0xc9, 0x2f, 0x48, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x2c, 0x4a, 0x4d, 0xce, - 0x2f, 0x4a, 0xd1, 0x2b, 0x48, 0x92, 0x12, 0x4b, 0x2e, 0xaa, 0x2c, 0x28, 0xc9, 0xd7, 0x2f, 0x48, - 0xd2, 0x87, 0xb0, 0x20, 0x4a, 0x94, 0x66, 0x31, 0x72, 0x71, 0xb8, 0x42, 0x75, 0x09, 0x19, 0x73, - 0x71, 0x15, 0x94, 0x26, 0xe5, 0x64, 0x26, 0xc7, 0x67, 0xa7, 0x56, 0x4a, 0x30, 0x2a, 0x30, 0x6a, - 0x70, 0x1b, 0x89, 0xe8, 0xc1, 0xd4, 0x27, 0xe9, 0x05, 0x80, 0x25, 0xbd, 0x53, 0x2b, 0x83, 0x38, - 0x0b, 0x60, 0x4c, 0x21, 0x45, 0x2e, 0x9e, 0x82, 0xc4, 0xca, 0x9c, 0xfc, 0xc4, 0x94, 0xf8, 0x92, - 0xca, 0x82, 0x54, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x9e, 0x20, 0x6e, 0xa8, 0x58, 0x48, 0x65, 0x41, - 0xaa, 0x90, 0x04, 0x17, 0x3b, 0x94, 0x2b, 0xc1, 0x0c, 0x96, 0x85, 0x71, 0x85, 0x64, 0xb8, 0x38, - 0x8b, 0x33, 0xd3, 0xf3, 0x12, 0x4b, 0x4a, 0x8b, 0x52, 0x25, 0x58, 0xc1, 0x72, 0x08, 0x01, 0x27, - 0x89, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, - 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x48, 0x62, 0x03, 0xbb, 0xde, 0x18, - 0x10, 0x00, 0x00, 0xff, 0xff, 0xaa, 0x0b, 0xd9, 0x6d, 0xf2, 0x00, 0x00, 0x00, -} - -func (m *Envelope) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *Envelope) GetPayload() []byte { + if x != nil { + return x.Payload } - return dAtA[:n], nil -} - -func (m *Envelope) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return nil } -func (m *Envelope) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Signature) > 0 { - i -= len(m.Signature) - copy(dAtA[i:], m.Signature) - i = encodeVarintEnvelope(dAtA, i, uint64(len(m.Signature))) - i-- - dAtA[i] = 0x2a +func (x *Envelope) GetSignature() []byte { + if x != nil { + return x.Signature } - if len(m.Payload) > 0 { - i -= len(m.Payload) - copy(dAtA[i:], m.Payload) - i = encodeVarintEnvelope(dAtA, i, uint64(len(m.Payload))) - i-- - dAtA[i] = 0x1a - } - if len(m.PayloadType) > 0 { - i -= len(m.PayloadType) - copy(dAtA[i:], m.PayloadType) - i = encodeVarintEnvelope(dAtA, i, uint64(len(m.PayloadType))) - i-- - dAtA[i] = 0x12 - } - if m.PublicKey != nil { - { - size, err := m.PublicKey.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEnvelope(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return nil } -func encodeVarintEnvelope(dAtA []byte, offset int, v uint64) int { - offset -= sovEnvelope(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Envelope) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PublicKey != nil { - l = m.PublicKey.Size() - n += 1 + l + sovEnvelope(uint64(l)) - } - l = len(m.PayloadType) - if l > 0 { - n += 1 + l + sovEnvelope(uint64(l)) - } - l = len(m.Payload) - if l > 0 { - n += 1 + l + sovEnvelope(uint64(l)) - } - l = len(m.Signature) - if l > 0 { - n += 1 + l + sovEnvelope(uint64(l)) - } - return n -} +var File_pb_envelope_proto protoreflect.FileDescriptor -func sovEnvelope(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozEnvelope(x uint64) (n int) { - return sovEnvelope(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +var file_pb_envelope_proto_rawDesc = []byte{ + 0x0a, 0x11, 0x70, 0x62, 0x2f, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x62, 0x1a, 0x1b, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9a, 0x01, 0x0a, 0x08, + 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x0a, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x6f, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, + 0x65, 0x79, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x0a, + 0x0c, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func (m *Envelope) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEnvelope - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Envelope: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Envelope: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PublicKey", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEnvelope - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEnvelope - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEnvelope - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PublicKey == nil { - m.PublicKey = &pb.PublicKey{} - } - if err := m.PublicKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PayloadType", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEnvelope - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthEnvelope - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthEnvelope - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PayloadType = append(m.PayloadType[:0], dAtA[iNdEx:postIndex]...) - if m.PayloadType == nil { - m.PayloadType = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEnvelope - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthEnvelope - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthEnvelope - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Payload = append(m.Payload[:0], dAtA[iNdEx:postIndex]...) - if m.Payload == nil { - m.Payload = []byte{} - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Signature", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEnvelope - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthEnvelope - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthEnvelope - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Signature = append(m.Signature[:0], dAtA[iNdEx:postIndex]...) - if m.Signature == nil { - m.Signature = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEnvelope(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthEnvelope - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthEnvelope - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipEnvelope(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEnvelope - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEnvelope - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEnvelope - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthEnvelope - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupEnvelope +var ( + file_pb_envelope_proto_rawDescOnce sync.Once + file_pb_envelope_proto_rawDescData = file_pb_envelope_proto_rawDesc +) + +func file_pb_envelope_proto_rawDescGZIP() []byte { + file_pb_envelope_proto_rawDescOnce.Do(func() { + file_pb_envelope_proto_rawDescData = protoimpl.X.CompressGZIP(file_pb_envelope_proto_rawDescData) + }) + return file_pb_envelope_proto_rawDescData +} + +var file_pb_envelope_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_pb_envelope_proto_goTypes = []interface{}{ + (*Envelope)(nil), // 0: record.pb.Envelope + (*pb.PublicKey)(nil), // 1: crypto.pb.PublicKey +} +var file_pb_envelope_proto_depIdxs = []int32{ + 1, // 0: record.pb.Envelope.public_key:type_name -> crypto.pb.PublicKey + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_pb_envelope_proto_init() } +func file_pb_envelope_proto_init() { + if File_pb_envelope_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pb_envelope_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Envelope); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthEnvelope - } - if depth == 0 { - return iNdEx, nil } } - return 0, io.ErrUnexpectedEOF + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pb_envelope_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pb_envelope_proto_goTypes, + DependencyIndexes: file_pb_envelope_proto_depIdxs, + MessageInfos: file_pb_envelope_proto_msgTypes, + }.Build() + File_pb_envelope_proto = out.File + file_pb_envelope_proto_rawDesc = nil + file_pb_envelope_proto_goTypes = nil + file_pb_envelope_proto_depIdxs = nil } - -var ( - ErrInvalidLengthEnvelope = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowEnvelope = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupEnvelope = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/libp2p/go-libp2p/core/record/pb/envelope.proto b/vendor/github.com/libp2p/go-libp2p/core/record/pb/envelope.proto index ca3555fb..05071ccd 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/record/pb/envelope.proto +++ b/vendor/github.com/libp2p/go-libp2p/core/record/pb/envelope.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package record.pb; -import "crypto/pb/crypto.proto"; +import "core/crypto/pb/crypto.proto"; // Envelope encloses a signed payload produced by a peer, along with the public // key of the keypair it was signed with so that it can be statelessly validated diff --git a/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/insecure.go b/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/insecure.go index d2487a3b..9ed20f09 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/insecure.go +++ b/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/insecure.go @@ -14,11 +14,15 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/core/sec" - pb "github.com/libp2p/go-libp2p/core/sec/insecure/pb" + "github.com/libp2p/go-libp2p/core/sec/insecure/pb" "github.com/libp2p/go-msgio" + + "google.golang.org/protobuf/proto" ) +//go:generate protoc --proto_path=$PWD:$PWD/../../.. --go_out=. --go_opt=Mpb/plaintext.proto=./pb pb/plaintext.proto + // ID is the multistream-select protocol ID that should be used when identifying // this security transport. const ID = "/plaintext/2.0.0" @@ -36,9 +40,7 @@ type Transport struct { var _ sec.SecureTransport = &Transport{} -// NewWithIdentity constructs a new insecure transport. The provided private key -// is stored and returned from LocalPrivateKey to satisfy the -// SecureTransport interface, and the public key is sent to +// NewWithIdentity constructs a new insecure transport. The public key is sent to // remote peers. No security is provided. func NewWithIdentity(protocolID protocol.ID, id peer.ID, key ci.PrivKey) *Transport { return &Transport{ @@ -53,12 +55,6 @@ func (t *Transport) LocalPeer() peer.ID { return t.id } -// LocalPrivateKey returns the local private key. -// This key is used only for identity generation and provides no security. -func (t *Transport) LocalPrivateKey() ci.PrivKey { - return t.key -} - // SecureInbound *pretends to secure* an inbound connection to the given peer. // It sends the local peer's ID and public key, and receives the same from the remote peer. // No validation is performed as to the authenticity or ownership of the provided public key, @@ -66,19 +62,18 @@ func (t *Transport) LocalPrivateKey() ci.PrivKey { // // SecureInbound may fail if the remote peer sends an ID and public key that are inconsistent // with each other, or if a network error occurs during the ID exchange. -func (t *Transport) SecureInbound(ctx context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, error) { +func (t *Transport) SecureInbound(_ context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, error) { conn := &Conn{ - Conn: insecure, - local: t.id, - localPrivKey: t.key, + Conn: insecure, + local: t.id, + localPubKey: t.key.GetPublic(), } - err := conn.runHandshakeSync() - if err != nil { + if err := conn.runHandshakeSync(); err != nil { return nil, err } - if t.key != nil && p != "" && p != conn.remote { + if p != "" && p != conn.remote { return nil, fmt.Errorf("remote peer sent unexpected peer ID. expected=%s received=%s", p, conn.remote) } @@ -93,19 +88,18 @@ func (t *Transport) SecureInbound(ctx context.Context, insecure net.Conn, p peer // SecureOutbound may fail if the remote peer sends an ID and public key that are inconsistent // with each other, or if the ID sent by the remote peer does not match the one dialed. It may // also fail if a network error occurs during the ID exchange. -func (t *Transport) SecureOutbound(ctx context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, error) { +func (t *Transport) SecureOutbound(_ context.Context, insecure net.Conn, p peer.ID) (sec.SecureConn, error) { conn := &Conn{ - Conn: insecure, - local: t.id, - localPrivKey: t.key, + Conn: insecure, + local: t.id, + localPubKey: t.key.GetPublic(), } - err := conn.runHandshakeSync() - if err != nil { + if err := conn.runHandshakeSync(); err != nil { return nil, err } - if t.key != nil && p != conn.remote { + if p != conn.remote { return nil, fmt.Errorf("remote peer sent unexpected peer ID. expected=%s received=%s", p, conn.remote) } @@ -113,19 +107,14 @@ func (t *Transport) SecureOutbound(ctx context.Context, insecure net.Conn, p pee return conn, nil } -func (t *Transport) ID() protocol.ID { - return t.protocolID -} +func (t *Transport) ID() protocol.ID { return t.protocolID } // Conn is the connection type returned by the insecure transport. type Conn struct { net.Conn - local peer.ID - remote peer.ID - - localPrivKey ci.PrivKey - remotePubKey ci.PubKey + local, remote peer.ID + localPubKey, remotePubKey ci.PubKey } func makeExchangeMessage(pubkey ci.PubKey) (*pb.Exchange, error) { @@ -146,12 +135,12 @@ func makeExchangeMessage(pubkey ci.PubKey) (*pb.Exchange, error) { func (ic *Conn) runHandshakeSync() error { // If we were initialized without keys, behave as in plaintext/1.0.0 (do nothing) - if ic.localPrivKey == nil { + if ic.localPubKey == nil { return nil } // Generate an Exchange message - msg, err := makeExchangeMessage(ic.localPrivKey.GetPublic()) + msg, err := makeExchangeMessage(ic.localPubKey) if err != nil { return err } @@ -190,7 +179,7 @@ func (ic *Conn) runHandshakeSync() error { func readWriteMsg(rw io.ReadWriter, out *pb.Exchange) (*pb.Exchange, error) { const maxMessageSize = 1 << 16 - outBytes, err := out.Marshal() + outBytes, err := proto.Marshal(out) if err != nil { return nil, err } @@ -201,7 +190,7 @@ func readWriteMsg(rw io.ReadWriter, out *pb.Exchange) (*pb.Exchange, error) { }() r := msgio.NewVarintReaderSize(rw, maxMessageSize) - msg, err1 := r.ReadMsg() + b, err1 := r.ReadMsg() // Always wait for the read to finish. err2 := <-wresult @@ -210,11 +199,11 @@ func readWriteMsg(rw io.ReadWriter, out *pb.Exchange) (*pb.Exchange, error) { return nil, err1 } if err2 != nil { - r.ReleaseMsg(msg) + r.ReleaseMsg(b) return nil, err2 } inMsg := new(pb.Exchange) - err = inMsg.Unmarshal(msg) + err = proto.Unmarshal(b, inMsg) return inMsg, err } @@ -235,11 +224,6 @@ func (ic *Conn) RemotePublicKey() ci.PubKey { return ic.remotePubKey } -// LocalPrivateKey returns the private key for the local peer. -func (ic *Conn) LocalPrivateKey() ci.PrivKey { - return ic.localPrivKey -} - // ConnState returns the security connection's state information. func (ic *Conn) ConnState() network.ConnectionState { return network.ConnectionState{} diff --git a/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/Makefile b/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/Makefile deleted file mode 100644 index 4fb825a4..00000000 --- a/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -PB = $(wildcard *.proto) -GO = $(PB:.proto=.pb.go) - -all: $(GO) - -%.pb.go: %.proto - protoc --proto_path=$(GOPATH)/src:../../../crypto/pb:. --gogofaster_out=. $< - -clean: - rm -f *.pb.go - rm -f *.go diff --git a/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/plaintext.pb.go b/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/plaintext.pb.go index cd8719d1..16b910b4 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/plaintext.pb.go +++ b/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/plaintext.pb.go @@ -1,383 +1,156 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: plaintext.proto +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: pb/plaintext.proto -package plaintext_pb +package pb import ( - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - proto "github.com/gogo/protobuf/proto" pb "github.com/libp2p/go-libp2p/core/crypto/pb" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Exchange struct { - Id []byte `protobuf:"bytes,1,opt,name=id" json:"id"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id []byte `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` Pubkey *pb.PublicKey `protobuf:"bytes,2,opt,name=pubkey" json:"pubkey,omitempty"` } -func (m *Exchange) Reset() { *m = Exchange{} } -func (m *Exchange) String() string { return proto.CompactTextString(m) } -func (*Exchange) ProtoMessage() {} -func (*Exchange) Descriptor() ([]byte, []int) { - return fileDescriptor_aba144f73931b711, []int{0} +func (x *Exchange) Reset() { + *x = Exchange{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_plaintext_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Exchange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + +func (x *Exchange) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Exchange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Exchange.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (*Exchange) ProtoMessage() {} + +func (x *Exchange) ProtoReflect() protoreflect.Message { + mi := &file_pb_plaintext_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *Exchange) XXX_Merge(src proto.Message) { - xxx_messageInfo_Exchange.Merge(m, src) -} -func (m *Exchange) XXX_Size() int { - return m.Size() -} -func (m *Exchange) XXX_DiscardUnknown() { - xxx_messageInfo_Exchange.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_Exchange proto.InternalMessageInfo +// Deprecated: Use Exchange.ProtoReflect.Descriptor instead. +func (*Exchange) Descriptor() ([]byte, []int) { + return file_pb_plaintext_proto_rawDescGZIP(), []int{0} +} -func (m *Exchange) GetId() []byte { - if m != nil { - return m.Id +func (x *Exchange) GetId() []byte { + if x != nil { + return x.Id } return nil } -func (m *Exchange) GetPubkey() *pb.PublicKey { - if m != nil { - return m.Pubkey +func (x *Exchange) GetPubkey() *pb.PublicKey { + if x != nil { + return x.Pubkey } return nil } -func init() { - proto.RegisterType((*Exchange)(nil), "plaintext.pb.Exchange") -} - -func init() { proto.RegisterFile("plaintext.proto", fileDescriptor_aba144f73931b711) } - -var fileDescriptor_aba144f73931b711 = []byte{ - // 187 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2f, 0xc8, 0x49, 0xcc, - 0xcc, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x41, 0x12, 0x48, - 0x92, 0x32, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0xcf, 0xc9, 0x4c, - 0x2a, 0x30, 0x2a, 0xd0, 0x4f, 0xcf, 0xd7, 0x85, 0xb0, 0x74, 0x93, 0xf3, 0x8b, 0x52, 0xf5, 0x93, - 0x8b, 0x2a, 0x0b, 0x4a, 0xf2, 0xf5, 0x0b, 0x92, 0xa0, 0x2c, 0x88, 0x31, 0x4a, 0x7e, 0x5c, 0x1c, - 0xae, 0x15, 0xc9, 0x19, 0x89, 0x79, 0xe9, 0xa9, 0x42, 0x22, 0x5c, 0x4c, 0x99, 0x29, 0x12, 0x8c, - 0x0a, 0x8c, 0x1a, 0x3c, 0x4e, 0x2c, 0x27, 0xee, 0xc9, 0x33, 0x04, 0x31, 0x65, 0xa6, 0x08, 0xe9, - 0x70, 0xb1, 0x15, 0x94, 0x26, 0x65, 0xa7, 0x56, 0x4a, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x1b, 0x89, - 0xe8, 0xc1, 0x0c, 0x48, 0xd2, 0x0b, 0x28, 0x4d, 0xca, 0xc9, 0x4c, 0xf6, 0x4e, 0xad, 0x0c, 0x82, - 0xaa, 0x71, 0x92, 0x38, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, - 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x06, 0x40, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x40, 0xde, 0x90, 0x0b, 0xc2, 0x00, 0x00, 0x00, -} - -func (m *Exchange) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} +var File_pb_plaintext_proto protoreflect.FileDescriptor -func (m *Exchange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +var file_pb_plaintext_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x70, 0x62, 0x2f, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, + 0x70, 0x62, 0x1a, 0x1b, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x2f, + 0x70, 0x62, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0x48, 0x0a, 0x08, 0x45, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2c, 0x0a, 0x06, 0x70, + 0x75, 0x62, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x6f, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, + 0x79, 0x52, 0x06, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, } -func (m *Exchange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pubkey != nil { - { - size, err := m.Pubkey.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPlaintext(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Id != nil { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarintPlaintext(dAtA, i, uint64(len(m.Id))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} +var ( + file_pb_plaintext_proto_rawDescOnce sync.Once + file_pb_plaintext_proto_rawDescData = file_pb_plaintext_proto_rawDesc +) -func encodeVarintPlaintext(dAtA []byte, offset int, v uint64) int { - offset -= sovPlaintext(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Exchange) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Id != nil { - l = len(m.Id) - n += 1 + l + sovPlaintext(uint64(l)) - } - if m.Pubkey != nil { - l = m.Pubkey.Size() - n += 1 + l + sovPlaintext(uint64(l)) - } - return n +func file_pb_plaintext_proto_rawDescGZIP() []byte { + file_pb_plaintext_proto_rawDescOnce.Do(func() { + file_pb_plaintext_proto_rawDescData = protoimpl.X.CompressGZIP(file_pb_plaintext_proto_rawDescData) + }) + return file_pb_plaintext_proto_rawDescData } -func sovPlaintext(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 +var file_pb_plaintext_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_pb_plaintext_proto_goTypes = []interface{}{ + (*Exchange)(nil), // 0: plaintext.pb.Exchange + (*pb.PublicKey)(nil), // 1: crypto.pb.PublicKey } -func sozPlaintext(x uint64) (n int) { - return sovPlaintext(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +var file_pb_plaintext_proto_depIdxs = []int32{ + 1, // 0: plaintext.pb.Exchange.pubkey:type_name -> crypto.pb.PublicKey + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } -func (m *Exchange) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlaintext - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Exchange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Exchange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlaintext - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthPlaintext - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthPlaintext - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...) - if m.Id == nil { - m.Id = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pubkey", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlaintext - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPlaintext - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPlaintext - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pubkey == nil { - m.Pubkey = &pb.PublicKey{} - } - if err := m.Pubkey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlaintext(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPlaintext - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthPlaintext - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func init() { file_pb_plaintext_proto_init() } +func file_pb_plaintext_proto_init() { + if File_pb_plaintext_proto != nil { + return } - return nil -} -func skipPlaintext(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlaintext - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlaintext - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlaintext - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthPlaintext + if !protoimpl.UnsafeEnabled { + file_pb_plaintext_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Exchange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupPlaintext - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthPlaintext - } - if depth == 0 { - return iNdEx, nil } } - return 0, io.ErrUnexpectedEOF + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pb_plaintext_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pb_plaintext_proto_goTypes, + DependencyIndexes: file_pb_plaintext_proto_depIdxs, + MessageInfos: file_pb_plaintext_proto_msgTypes, + }.Build() + File_pb_plaintext_proto = out.File + file_pb_plaintext_proto_rawDesc = nil + file_pb_plaintext_proto_goTypes = nil + file_pb_plaintext_proto_depIdxs = nil } - -var ( - ErrInvalidLengthPlaintext = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPlaintext = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupPlaintext = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/plaintext.proto b/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/plaintext.proto index 0e792b3c..634100bd 100644 --- a/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/plaintext.proto +++ b/vendor/github.com/libp2p/go-libp2p/core/sec/insecure/pb/plaintext.proto @@ -2,7 +2,7 @@ syntax = "proto2"; package plaintext.pb; -import "github.com/libp2p/go-libp2p/core/crypto/pb/crypto.proto"; +import "core/crypto/pb/crypto.proto"; message Exchange { optional bytes id = 1; diff --git a/vendor/github.com/libp2p/go-libp2p/defaults.go b/vendor/github.com/libp2p/go-libp2p/defaults.go index bf0380eb..c0ed6698 100644 --- a/vendor/github.com/libp2p/go-libp2p/defaults.go +++ b/vendor/github.com/libp2p/go-libp2p/defaults.go @@ -15,6 +15,8 @@ import ( quic "github.com/libp2p/go-libp2p/p2p/transport/quic" "github.com/libp2p/go-libp2p/p2p/transport/tcp" ws "github.com/libp2p/go-libp2p/p2p/transport/websocket" + webtransport "github.com/libp2p/go-libp2p/p2p/transport/webtransport" + "github.com/prometheus/client_golang/prometheus" "github.com/multiformats/go-multiaddr" madns "github.com/multiformats/go-multiaddr-dns" @@ -33,7 +35,7 @@ var DefaultSecurity = ChainOptions( // // Use this option when you want to *extend* the set of multiplexers used by // libp2p instead of replacing them. -var DefaultMuxers = Muxer("/yamux/1.0.0", yamux.DefaultTransport) +var DefaultMuxers = Muxer(yamux.ID, yamux.DefaultTransport) // DefaultTransports are the default libp2p transports. // @@ -43,6 +45,7 @@ var DefaultTransports = ChainOptions( Transport(tcp.NewTCPTransport), Transport(quic.NewTransport), Transport(ws.New), + Transport(webtransport.New), ) // DefaultPrivateTransports are the default libp2p transports when a PSK is supplied. @@ -78,9 +81,11 @@ var DefaultListenAddrs = func(cfg *Config) error { "/ip4/0.0.0.0/tcp/0", "/ip4/0.0.0.0/udp/0/quic", "/ip4/0.0.0.0/udp/0/quic-v1", + "/ip4/0.0.0.0/udp/0/quic-v1/webtransport", "/ip6/::/tcp/0", "/ip6/::/udp/0/quic", "/ip6/::/udp/0/quic-v1", + "/ip6/::/udp/0/quic-v1/webtransport", } listenAddrs := make([]multiaddr.Multiaddr, 0, len(addrs)) for _, s := range addrs { @@ -125,6 +130,11 @@ var DefaultMultiaddrResolver = func(cfg *Config) error { return cfg.Apply(MultiaddrResolver(madns.DefaultResolver)) } +// DefaultPrometheusRegisterer configures libp2p to use the default registerer +var DefaultPrometheusRegisterer = func(cfg *Config) error { + return cfg.Apply(PrometheusRegisterer(prometheus.DefaultRegisterer)) +} + // Complete list of default options and when to fallback on them. // // Please *DON'T* specify default options any other way. Putting this all here @@ -177,6 +187,10 @@ var defaults = []struct { fallback: func(cfg *Config) bool { return cfg.MultiaddrResolver == nil }, opt: DefaultMultiaddrResolver, }, + { + fallback: func(cfg *Config) bool { return !cfg.DisableMetrics && cfg.PrometheusRegisterer == nil }, + opt: DefaultPrometheusRegisterer, + }, } // Defaults configures libp2p to use the default options. Can be combined with diff --git a/vendor/github.com/libp2p/go-libp2p/libp2p.go b/vendor/github.com/libp2p/go-libp2p/libp2p.go index d77c3b9b..db23253b 100644 --- a/vendor/github.com/libp2p/go-libp2p/libp2p.go +++ b/vendor/github.com/libp2p/go-libp2p/libp2p.go @@ -37,8 +37,7 @@ func ChainOptions(opts ...Option) Option { // transport protocols; // // - If no multiplexer configuration is provided, the node is configured by -// default to use the "yamux/1.0.0" and "mplux/6.7.0" stream connection -// multiplexers; +// default to use yamux; // // - If no security transport is provided, the host uses the go-libp2p's noise // and/or tls encrypted transport to encrypt all traffic; diff --git a/vendor/github.com/libp2p/go-libp2p/limits.go b/vendor/github.com/libp2p/go-libp2p/limits.go index cf81fa76..5871577e 100644 --- a/vendor/github.com/libp2p/go-libp2p/limits.go +++ b/vendor/github.com/libp2p/go-libp2p/limits.go @@ -4,7 +4,6 @@ import ( "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/p2p/host/autonat" rcmgr "github.com/libp2p/go-libp2p/p2p/host/resource-manager" - relayv1 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/relay" circuit "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto" relayv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay" "github.com/libp2p/go-libp2p/p2p/protocol/holepunch" @@ -25,7 +24,7 @@ func SetDefaultServiceLimits(config *rcmgr.ScalingLimitConfig) { rcmgr.BaseLimit{StreamsInbound: 16, StreamsOutbound: 16, Streams: 32, Memory: 1 << 20}, rcmgr.BaseLimitIncrease{}, ) - for _, id := range [...]protocol.ID{identify.ID, identify.IDDelta, identify.IDPush} { + for _, id := range [...]protocol.ID{identify.ID, identify.IDPush} { config.AddProtocolLimit( id, rcmgr.BaseLimit{StreamsInbound: 64, StreamsOutbound: 64, Streams: 128, Memory: 4 << 20}, @@ -76,18 +75,6 @@ func SetDefaultServiceLimits(config *rcmgr.ScalingLimitConfig) { rcmgr.BaseLimitIncrease{}, ) - // relay/v1 - config.AddServiceLimit( - relayv1.ServiceName, - rcmgr.BaseLimit{StreamsInbound: 256, StreamsOutbound: 256, Streams: 256, Memory: 16 << 20}, - rcmgr.BaseLimitIncrease{StreamsInbound: 256, StreamsOutbound: 256, Streams: 256, Memory: 16 << 20}, - ) - config.AddServicePeerLimit( - relayv1.ServiceName, - rcmgr.BaseLimit{StreamsInbound: 64, StreamsOutbound: 64, Streams: 64, Memory: 1 << 20}, - rcmgr.BaseLimitIncrease{}, - ) - // relay/v2 config.AddServiceLimit( relayv2.ServiceName, @@ -101,7 +88,7 @@ func SetDefaultServiceLimits(config *rcmgr.ScalingLimitConfig) { ) // circuit protocols, both client and service - for _, proto := range [...]protocol.ID{circuit.ProtoIDv1, circuit.ProtoIDv2Hop, circuit.ProtoIDv2Stop} { + for _, proto := range [...]protocol.ID{circuit.ProtoIDv2Hop, circuit.ProtoIDv2Stop} { config.AddProtocolLimit( proto, rcmgr.BaseLimit{StreamsInbound: 640, StreamsOutbound: 640, Streams: 640, Memory: 16 << 20}, diff --git a/vendor/github.com/libp2p/go-libp2p/options.go b/vendor/github.com/libp2p/go-libp2p/options.go index 381706f7..1809ec44 100644 --- a/vendor/github.com/libp2p/go-libp2p/options.go +++ b/vendor/github.com/libp2p/go-libp2p/options.go @@ -16,6 +16,7 @@ import ( "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/metrics" "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/pnet" "github.com/libp2p/go-libp2p/core/protocol" @@ -26,6 +27,7 @@ import ( relayv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay" "github.com/libp2p/go-libp2p/p2p/protocol/holepunch" "github.com/libp2p/go-libp2p/p2p/transport/quicreuse" + "github.com/prometheus/client_golang/prometheus" ma "github.com/multiformats/go-multiaddr" madns "github.com/multiformats/go-multiaddr-dns" @@ -307,6 +309,8 @@ func EnableRelayService(opts ...relayv2.Option) Option { // // This subsystem performs automatic address rewriting to advertise relay addresses when it // detects that the node is publicly unreachable (e.g. behind a NAT). +// +// Deprecated: Use EnableAutoRelayWithStaticRelays or EnableAutoRelayWithPeerSource func EnableAutoRelay(opts ...autorelay.Option) Option { return func(cfg *Config) error { cfg.EnableAutoRelay = true @@ -315,6 +319,31 @@ func EnableAutoRelay(opts ...autorelay.Option) Option { } } +// EnableAutoRelayWithStaticRelays configures libp2p to enable the AutoRelay subsystem using +// the provided relays as relay candidates. +// This subsystem performs automatic address rewriting to advertise relay addresses when it +// detects that the node is publicly unreachable (e.g. behind a NAT). +func EnableAutoRelayWithStaticRelays(static []peer.AddrInfo, opts ...autorelay.Option) Option { + return func(cfg *Config) error { + cfg.EnableAutoRelay = true + cfg.AutoRelayOpts = append([]autorelay.Option{autorelay.WithStaticRelays(static)}, opts...) + return nil + } +} + +// EnableAutoRelayWithPeerSource configures libp2p to enable the AutoRelay +// subsystem using the provided PeerSource callback to get more relay +// candidates. This subsystem performs automatic address rewriting to advertise +// relay addresses when it detects that the node is publicly unreachable (e.g. +// behind a NAT). +func EnableAutoRelayWithPeerSource(peerSource autorelay.PeerSource, opts ...autorelay.Option) Option { + return func(cfg *Config) error { + cfg.EnableAutoRelay = true + cfg.AutoRelayOpts = append([]autorelay.Option{autorelay.WithPeerSource(peerSource)}, opts...) + return nil + } +} + // ForceReachabilityPublic overrides automatic reachability detection in the AutoNAT subsystem, // forcing the local node to believe it is reachable externally. func ForceReachabilityPublic() Option { @@ -520,3 +549,28 @@ func WithDialTimeout(t time.Duration) Option { return nil } } + +// DisableMetrics configures libp2p to disable prometheus metrics +func DisableMetrics() Option { + return func(cfg *Config) error { + cfg.DisableMetrics = true + return nil + } +} + +// PrometheusRegisterer configures libp2p to use reg as the Registerer for all metrics subsystems +func PrometheusRegisterer(reg prometheus.Registerer) Option { + return func(cfg *Config) error { + if cfg.DisableMetrics { + return errors.New("cannot set registerer when metrics are disabled") + } + if cfg.PrometheusRegisterer != nil { + return errors.New("registerer already set") + } + if reg == nil { + return errors.New("registerer cannot be nil") + } + cfg.PrometheusRegisterer = reg + return nil + } +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/autonat.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/autonat.go index 0fd8b0ec..fc8c6763 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/autonat.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/autonat.go @@ -2,7 +2,6 @@ package autonat import ( "context" - "errors" "math/rand" "sync/atomic" "time" @@ -20,6 +19,8 @@ import ( var log = logging.Logger("autonat") +const maxConfidence = 3 + // AmbientAutoNAT is the implementation of ambient NAT autodiscovery type AmbientAutoNAT struct { host host.Host @@ -30,10 +31,10 @@ type AmbientAutoNAT struct { ctxCancel context.CancelFunc // is closed when Close is called backgroundRunning chan struct{} // is closed when the background go routine exits - inboundConn chan network.Conn - observations chan autoNATResult + inboundConn chan network.Conn + dialResponses chan error // status is an autoNATResult reflecting current status. - status atomic.Value + status atomic.Pointer[network.Reachability] // Reflects the confidence on of the NATStatus being private, as a single // dialback may fail for reasons unrelated to NAT. // If it is <3, then multiple autoNAT peers may be contacted for dialback @@ -58,11 +59,6 @@ type StaticAutoNAT struct { service *autoNATService } -type autoNATResult struct { - network.Reachability - address ma.Multiaddr -} - // New creates a new NAT autodiscovery system attached to a host func New(h host.Host, options ...Option) (AutoNAT, error) { var err error @@ -111,15 +107,19 @@ func New(h host.Host, options ...Option) (AutoNAT, error) { host: h, config: conf, inboundConn: make(chan network.Conn, 5), - observations: make(chan autoNATResult, 1), + dialResponses: make(chan error, 1), emitReachabilityChanged: emitReachabilityChanged, service: service, recentProbes: make(map[peer.ID]time.Time), } - as.status.Store(autoNATResult{network.ReachabilityUnknown, nil}) + reachability := network.ReachabilityUnknown + as.status.Store(&reachability) - subscriber, err := as.host.EventBus().Subscribe([]interface{}{new(event.EvtLocalAddressesUpdated), new(event.EvtPeerIdentificationCompleted)}) + subscriber, err := as.host.EventBus().Subscribe( + []any{new(event.EvtLocalAddressesUpdated), new(event.EvtPeerIdentificationCompleted)}, + eventbus.Name("autonat"), + ) if err != nil { return nil, err } @@ -133,23 +133,16 @@ func New(h host.Host, options ...Option) (AutoNAT, error) { // Status returns the AutoNAT observed reachability status. func (as *AmbientAutoNAT) Status() network.Reachability { - s := as.status.Load().(autoNATResult) - return s.Reachability + s := as.status.Load() + return *s } func (as *AmbientAutoNAT) emitStatus() { - status := as.status.Load().(autoNATResult) - as.emitReachabilityChanged.Emit(event.EvtLocalReachabilityChanged{Reachability: status.Reachability}) -} - -// PublicAddr returns the publicly connectable Multiaddr of this node if one is known. -func (as *AmbientAutoNAT) PublicAddr() (ma.Multiaddr, error) { - s := as.status.Load().(autoNATResult) - if s.Reachability != network.ReachabilityPublic { - return nil, errors.New("NAT status is not public") + status := *as.status.Load() + as.emitReachabilityChanged.Emit(event.EvtLocalReachabilityChanged{Reachability: status}) + if as.metricsTracer != nil { + as.metricsTracer.ReachabilityStatus(status) } - - return s.address, nil } func ipInList(candidate ma.Multiaddr, list []ma.Multiaddr) bool { @@ -168,7 +161,6 @@ func (as *AmbientAutoNAT) background() { // before starting autodetection delay := as.config.bootDelay - var lastAddrUpdated time.Time subChan := as.subscriber.Out() defer as.subscriber.Close() defer as.emitReachabilityChanged.Close() @@ -176,15 +168,12 @@ func (as *AmbientAutoNAT) background() { timer := time.NewTimer(delay) defer timer.Stop() timerRunning := true + retryProbe := false for { select { // new inbound connection. case conn := <-as.inboundConn: localAddrs := as.host.Addrs() - ca := as.status.Load().(autoNATResult) - if ca.address != nil { - localAddrs = append(localAddrs, ca.address) - } if manet.IsPublicAddr(conn.RemoteMultiaddr()) && !ipInList(conn.RemoteMultiaddr(), localAddrs) { as.lastInbound = time.Now() @@ -193,16 +182,15 @@ func (as *AmbientAutoNAT) background() { case e := <-subChan: switch e := e.(type) { case event.EvtLocalAddressesUpdated: - if !lastAddrUpdated.Add(time.Second).After(time.Now()) { - lastAddrUpdated = time.Now() - if as.confidence > 1 { - as.confidence-- - } + // On local address update, reduce confidence from maximum so that we schedule + // the next probe sooner + if as.confidence == maxConfidence { + as.confidence-- } case event.EvtPeerIdentificationCompleted: if s, err := as.host.Peerstore().SupportsProtocols(e.Peer, AutoNATProto); err == nil && len(s) > 0 { - currentStatus := as.status.Load().(autoNATResult) - if currentStatus.Reachability == network.ReachabilityUnknown { + currentStatus := *as.status.Load() + if currentStatus == network.ReachabilityUnknown { as.tryProbe(e.Peer) } } @@ -211,15 +199,20 @@ func (as *AmbientAutoNAT) background() { } // probe finished. - case result, ok := <-as.observations: + case err, ok := <-as.dialResponses: if !ok { return } - as.recordObservation(result) + if IsDialRefused(err) { + retryProbe = true + } else { + as.handleDialResponse(err) + } case <-timer.C: peer := as.getPeerToProbe() as.tryProbe(peer) timerRunning = false + retryProbe = false case <-as.ctx.Done(): return } @@ -228,7 +221,7 @@ func (as *AmbientAutoNAT) background() { if timerRunning && !timer.Stop() { <-timer.C } - timer.Reset(as.scheduleProbe()) + timer.Reset(as.scheduleProbe(retryProbe)) timerRunning = true } } @@ -243,14 +236,15 @@ func (as *AmbientAutoNAT) cleanupRecentProbes() { } // scheduleProbe calculates when the next probe should be scheduled for. -func (as *AmbientAutoNAT) scheduleProbe() time.Duration { +func (as *AmbientAutoNAT) scheduleProbe(retryProbe bool) time.Duration { // Our baseline is a probe every 'AutoNATRefreshInterval' // This is modulated by: - // * if we are in an unknown state, or have low confidence, that should drop to 'AutoNATRetryInterval' + // * if we are in an unknown state, have low confidence, or we want to retry because a probe was refused that + // should drop to 'AutoNATRetryInterval' // * recent inbound connections (implying continued connectivity) should decrease the retry when public // * recent inbound connections when not public mean we should try more actively to see if we're public. fixedNow := time.Now() - currentStatus := as.status.Load().(autoNATResult) + currentStatus := *as.status.Load() nextProbe := fixedNow // Don't look for peers in the peer store more than once per second. @@ -262,13 +256,15 @@ func (as *AmbientAutoNAT) scheduleProbe() time.Duration { } if !as.lastProbe.IsZero() { untilNext := as.config.refreshInterval - if currentStatus.Reachability == network.ReachabilityUnknown { + if retryProbe { untilNext = as.config.retryInterval - } else if as.confidence < 3 { + } else if currentStatus == network.ReachabilityUnknown { untilNext = as.config.retryInterval - } else if currentStatus.Reachability == network.ReachabilityPublic && as.lastInbound.After(as.lastProbe) { + } else if as.confidence < maxConfidence { + untilNext = as.config.retryInterval + } else if currentStatus == network.ReachabilityPublic && as.lastInbound.After(as.lastProbe) { untilNext *= 2 - } else if currentStatus.Reachability != network.ReachabilityPublic && as.lastInbound.After(as.lastProbe) { + } else if currentStatus != network.ReachabilityPublic && as.lastInbound.After(as.lastProbe) { untilNext /= 5 } @@ -276,72 +272,86 @@ func (as *AmbientAutoNAT) scheduleProbe() time.Duration { nextProbe = as.lastProbe.Add(untilNext) } } - + if as.metricsTracer != nil { + as.metricsTracer.NextProbeTime(nextProbe) + } return nextProbe.Sub(fixedNow) } -// Update the current status based on an observed result. -func (as *AmbientAutoNAT) recordObservation(observation autoNATResult) { - currentStatus := as.status.Load().(autoNATResult) - if observation.Reachability == network.ReachabilityPublic { - log.Debugf("NAT status is public") +// handleDialResponse updates the current status based on dial response. +func (as *AmbientAutoNAT) handleDialResponse(dialErr error) { + var observation network.Reachability + switch { + case dialErr == nil: + observation = network.ReachabilityPublic + case IsDialError(dialErr): + observation = network.ReachabilityPrivate + default: + observation = network.ReachabilityUnknown + } + + as.recordObservation(observation) +} + +// recordObservation updates NAT status and confidence +func (as *AmbientAutoNAT) recordObservation(observation network.Reachability) { + + currentStatus := *as.status.Load() + + if observation == network.ReachabilityPublic { changed := false - if currentStatus.Reachability != network.ReachabilityPublic { + if currentStatus != network.ReachabilityPublic { + // Aggressively switch to public from other states ignoring confidence + log.Debugf("NAT status is public") + // we are flipping our NATStatus, so confidence drops to 0 as.confidence = 0 if as.service != nil { as.service.Enable() } changed = true - } else if as.confidence < 3 { + } else if as.confidence < maxConfidence { as.confidence++ } - if observation.address != nil { - if !changed && currentStatus.address != nil && !observation.address.Equal(currentStatus.address) { - as.confidence-- - } - if currentStatus.address == nil || !observation.address.Equal(currentStatus.address) { - changed = true - } - as.status.Store(observation) - } - if observation.address != nil && changed { + as.status.Store(&observation) + if changed { as.emitStatus() } - } else if observation.Reachability == network.ReachabilityPrivate { - log.Debugf("NAT status is private") - if currentStatus.Reachability == network.ReachabilityPublic { + } else if observation == network.ReachabilityPrivate { + if currentStatus != network.ReachabilityPrivate { if as.confidence > 0 { as.confidence-- } else { + log.Debugf("NAT status is private") + // we are flipping our NATStatus, so confidence drops to 0 as.confidence = 0 - as.status.Store(observation) + as.status.Store(&observation) if as.service != nil { as.service.Disable() } as.emitStatus() } - } else if as.confidence < 3 { + } else if as.confidence < maxConfidence { as.confidence++ - as.status.Store(observation) - if currentStatus.Reachability != network.ReachabilityPrivate { - as.emitStatus() - } + as.status.Store(&observation) } } else if as.confidence > 0 { // don't just flip to unknown, reduce confidence first as.confidence-- } else { log.Debugf("NAT status is unknown") - as.status.Store(autoNATResult{network.ReachabilityUnknown, nil}) - if currentStatus.Reachability != network.ReachabilityUnknown { + as.status.Store(&observation) + if currentStatus != network.ReachabilityUnknown { if as.service != nil { as.service.Enable() } as.emitStatus() } } + if as.metricsTracer != nil { + as.metricsTracer.ReachabilityStatusConfidence(as.confidence) + } } func (as *AmbientAutoNAT) tryProbe(p peer.ID) bool { @@ -369,27 +379,15 @@ func (as *AmbientAutoNAT) tryProbe(p peer.ID) bool { } func (as *AmbientAutoNAT) probe(pi *peer.AddrInfo) { - cli := NewAutoNATClient(as.host, as.config.addressFunc) + cli := NewAutoNATClient(as.host, as.config.addressFunc, as.metricsTracer) ctx, cancel := context.WithTimeout(as.ctx, as.config.requestTimeout) defer cancel() - a, err := cli.DialBack(ctx, pi.ID) - - var result autoNATResult - switch { - case err == nil: - log.Debugf("Dialback through %s successful; public address is %s", pi.ID.Pretty(), a.String()) - result.Reachability = network.ReachabilityPublic - result.address = a - case IsDialError(err): - log.Debugf("Dialback through %s failed", pi.ID.Pretty()) - result.Reachability = network.ReachabilityPrivate - default: - result.Reachability = network.ReachabilityUnknown - } + err := cli.DialBack(ctx, pi.ID) + log.Debugf("Dialback through peer %s completed: err: %s", pi.ID, err) select { - case as.observations <- result: + case as.dialResponses <- err: case <-as.ctx.Done(): return } @@ -427,8 +425,7 @@ func (as *AmbientAutoNAT) getPeerToProbe() peer.ID { return "" } - shufflePeers(candidates) - return candidates[0] + return candidates[rand.Intn(len(candidates))] } func (as *AmbientAutoNAT) Close() error { @@ -440,26 +437,11 @@ func (as *AmbientAutoNAT) Close() error { return nil } -func shufflePeers(peers []peer.ID) { - for i := range peers { - j := rand.Intn(i + 1) - peers[i], peers[j] = peers[j], peers[i] - } -} - // Status returns the AutoNAT observed reachability status. func (s *StaticAutoNAT) Status() network.Reachability { return s.reachability } -// PublicAddr returns the publicly connectable Multiaddr of this node if one is known. -func (s *StaticAutoNAT) PublicAddr() (ma.Multiaddr, error) { - if s.reachability != network.ReachabilityPublic { - return nil, errors.New("NAT status is not public") - } - return nil, errors.New("no available address") -} - func (s *StaticAutoNAT) Close() error { if s.service != nil { s.service.Disable() diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/client.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/client.go index 3edbeb50..fa0e03bc 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/client.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/client.go @@ -8,24 +8,24 @@ import ( "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" - pb "github.com/libp2p/go-libp2p/p2p/host/autonat/pb" + "github.com/libp2p/go-libp2p/p2p/host/autonat/pb" - "github.com/libp2p/go-msgio/protoio" - ma "github.com/multiformats/go-multiaddr" + "github.com/libp2p/go-msgio/pbio" ) // NewAutoNATClient creates a fresh instance of an AutoNATClient // If addrFunc is nil, h.Addrs will be used -func NewAutoNATClient(h host.Host, addrFunc AddrFunc) Client { +func NewAutoNATClient(h host.Host, addrFunc AddrFunc, mt MetricsTracer) Client { if addrFunc == nil { addrFunc = h.Addrs } - return &client{h: h, addrFunc: addrFunc} + return &client{h: h, addrFunc: addrFunc, mt: mt} } type client struct { h host.Host addrFunc AddrFunc + mt MetricsTracer } // DialBack asks peer p to dial us back on all addresses returned by the addrFunc. @@ -34,22 +34,22 @@ type client struct { // Note: A returned error Message_E_DIAL_ERROR does not imply that the server // actually performed a dial attempt. Servers that run a version < v0.20.0 also // return Message_E_DIAL_ERROR if the dial was skipped due to the dialPolicy. -func (c *client) DialBack(ctx context.Context, p peer.ID) (ma.Multiaddr, error) { +func (c *client) DialBack(ctx context.Context, p peer.ID) error { s, err := c.h.NewStream(ctx, p, AutoNATProto) if err != nil { - return nil, err + return err } if err := s.Scope().SetService(ServiceName); err != nil { log.Debugf("error attaching stream to autonat service: %s", err) s.Reset() - return nil, err + return err } if err := s.Scope().ReserveMemory(maxMsgSize, network.ReservationPriorityAlways); err != nil { log.Debugf("error reserving memory for autonat stream: %s", err) s.Reset() - return nil, err + return err } defer s.Scope().ReleaseMemory(maxMsgSize) @@ -58,32 +58,34 @@ func (c *client) DialBack(ctx context.Context, p peer.ID) (ma.Multiaddr, error) // don't care about being nice. defer s.Close() - r := protoio.NewDelimitedReader(s, maxMsgSize) - w := protoio.NewDelimitedWriter(s) + r := pbio.NewDelimitedReader(s, maxMsgSize) + w := pbio.NewDelimitedWriter(s) req := newDialMessage(peer.AddrInfo{ID: c.h.ID(), Addrs: c.addrFunc()}) if err := w.WriteMsg(req); err != nil { s.Reset() - return nil, err + return err } var res pb.Message if err := r.ReadMsg(&res); err != nil { s.Reset() - return nil, err + return err } if res.GetType() != pb.Message_DIAL_RESPONSE { s.Reset() - return nil, fmt.Errorf("unexpected response: %s", res.GetType().String()) + return fmt.Errorf("unexpected response: %s", res.GetType().String()) } status := res.GetDialResponse().GetStatus() + if c.mt != nil { + c.mt.ReceivedDialResponse(status) + } switch status { case pb.Message_OK: - addr := res.GetDialResponse().GetAddr() - return ma.NewMultiaddrBytes(addr) + return nil default: - return nil, Error{Status: status, Text: res.GetDialResponse().GetStatusText()} + return Error{Status: status, Text: res.GetDialResponse().GetStatusText()} } } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/interface.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/interface.go index f4c89bea..9bf3bfe5 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/interface.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/interface.go @@ -14,9 +14,6 @@ import ( type AutoNAT interface { // Status returns the current NAT status Status() network.Reachability - // PublicAddr returns the public dial address when NAT status is public and an - // error otherwise - PublicAddr() (ma.Multiaddr, error) io.Closer } @@ -24,7 +21,7 @@ type AutoNAT interface { type Client interface { // DialBack requests from a peer providing AutoNAT services to test dial back // and report the address on a successful connection. - DialBack(ctx context.Context, p peer.ID) (ma.Multiaddr, error) + DialBack(ctx context.Context, p peer.ID) error } // AddrFunc is a function returning the candidate addresses for the local host. diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/metrics.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/metrics.go new file mode 100644 index 00000000..4207d4e7 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/metrics.go @@ -0,0 +1,162 @@ +package autonat + +import ( + "time" + + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/p2p/host/autonat/pb" + "github.com/libp2p/go-libp2p/p2p/metricshelper" + "github.com/prometheus/client_golang/prometheus" +) + +const metricNamespace = "libp2p_autonat" + +var ( + reachabilityStatus = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: metricNamespace, + Name: "reachability_status", + Help: "Current node reachability", + }, + ) + reachabilityStatusConfidence = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: metricNamespace, + Name: "reachability_status_confidence", + Help: "Node reachability status confidence", + }, + ) + receivedDialResponseTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "received_dial_response_total", + Help: "Count of dial responses for client", + }, + []string{"response_status"}, + ) + outgoingDialResponseTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "outgoing_dial_response_total", + Help: "Count of dial responses for server", + }, + []string{"response_status"}, + ) + outgoingDialRefusedTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "outgoing_dial_refused_total", + Help: "Count of dial requests refused by server", + }, + []string{"refusal_reason"}, + ) + nextProbeTimestamp = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: metricNamespace, + Name: "next_probe_timestamp", + Help: "Time of next probe", + }, + ) + collectors = []prometheus.Collector{ + reachabilityStatus, + reachabilityStatusConfidence, + receivedDialResponseTotal, + outgoingDialResponseTotal, + outgoingDialRefusedTotal, + nextProbeTimestamp, + } +) + +type MetricsTracer interface { + ReachabilityStatus(status network.Reachability) + ReachabilityStatusConfidence(confidence int) + ReceivedDialResponse(status pb.Message_ResponseStatus) + OutgoingDialResponse(status pb.Message_ResponseStatus) + OutgoingDialRefused(reason string) + NextProbeTime(t time.Time) +} + +func getResponseStatus(status pb.Message_ResponseStatus) string { + var s string + switch status { + case pb.Message_OK: + s = "ok" + case pb.Message_E_DIAL_ERROR: + s = "dial error" + case pb.Message_E_DIAL_REFUSED: + s = "dial refused" + case pb.Message_E_BAD_REQUEST: + s = "bad request" + case pb.Message_E_INTERNAL_ERROR: + s = "internal error" + default: + s = "unknown" + } + return s +} + +const ( + rate_limited = "rate limited" + dial_blocked = "dial blocked" + no_valid_address = "no valid address" +) + +type metricsTracer struct{} + +var _ MetricsTracer = &metricsTracer{} + +type metricsTracerSetting struct { + reg prometheus.Registerer +} + +type MetricsTracerOption func(*metricsTracerSetting) + +func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption { + return func(s *metricsTracerSetting) { + if reg != nil { + s.reg = reg + } + } +} + +func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer { + setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer} + for _, opt := range opts { + opt(setting) + } + metricshelper.RegisterCollectors(setting.reg, collectors...) + return &metricsTracer{} +} + +func (mt *metricsTracer) ReachabilityStatus(status network.Reachability) { + reachabilityStatus.Set(float64(status)) +} + +func (mt *metricsTracer) ReachabilityStatusConfidence(confidence int) { + reachabilityStatusConfidence.Set(float64(confidence)) +} + +func (mt *metricsTracer) ReceivedDialResponse(status pb.Message_ResponseStatus) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + *tags = append(*tags, getResponseStatus(status)) + receivedDialResponseTotal.WithLabelValues(*tags...).Inc() +} + +func (mt *metricsTracer) OutgoingDialResponse(status pb.Message_ResponseStatus) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + *tags = append(*tags, getResponseStatus(status)) + outgoingDialResponseTotal.WithLabelValues(*tags...).Inc() +} + +func (mt *metricsTracer) OutgoingDialRefused(reason string) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + *tags = append(*tags, reason) + outgoingDialRefusedTotal.WithLabelValues(*tags...).Inc() +} + +func (mt *metricsTracer) NextProbeTime(t time.Time) { + nextProbeTimestamp.Set(float64(t.Unix())) +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/options.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/options.go index 0935dc23..8e653f81 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/options.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/options.go @@ -17,6 +17,7 @@ type config struct { dialer network.Network forceReachability bool reachability network.Reachability + metricsTracer MetricsTracer // client bootDelay time.Duration @@ -142,3 +143,11 @@ func WithPeerThrottling(amount int) Option { return nil } } + +// WithMetricsTracer uses mt to track autonat metrics +func WithMetricsTracer(mt MetricsTracer) Option { + return func(c *config) error { + c.metricsTracer = mt + return nil + } +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/pb/Makefile b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/pb/Makefile deleted file mode 100644 index dd21e878..00000000 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/pb/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -pbgos := $(patsubst %.proto,%.pb.go,$(wildcard *.proto)) - -all: $(pbgos) - -%.pb.go: %.proto - protoc --gogofast_out=. --proto_path=$(GOPATH)/src:. $< diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/pb/autonat.pb.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/pb/autonat.pb.go index a22b5e99..2764883f 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/pb/autonat.pb.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/pb/autonat.pb.go @@ -1,26 +1,24 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: autonat.proto +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: pb/autonat.proto -package autonat_pb +package pb import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Message_MessageType int32 @@ -29,15 +27,17 @@ const ( Message_DIAL_RESPONSE Message_MessageType = 1 ) -var Message_MessageType_name = map[int32]string{ - 0: "DIAL", - 1: "DIAL_RESPONSE", -} - -var Message_MessageType_value = map[string]int32{ - "DIAL": 0, - "DIAL_RESPONSE": 1, -} +// Enum value maps for Message_MessageType. +var ( + Message_MessageType_name = map[int32]string{ + 0: "DIAL", + 1: "DIAL_RESPONSE", + } + Message_MessageType_value = map[string]int32{ + "DIAL": 0, + "DIAL_RESPONSE": 1, + } +) func (x Message_MessageType) Enum() *Message_MessageType { p := new(Message_MessageType) @@ -46,20 +46,34 @@ func (x Message_MessageType) Enum() *Message_MessageType { } func (x Message_MessageType) String() string { - return proto.EnumName(Message_MessageType_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Message_MessageType) Descriptor() protoreflect.EnumDescriptor { + return file_pb_autonat_proto_enumTypes[0].Descriptor() +} + +func (Message_MessageType) Type() protoreflect.EnumType { + return &file_pb_autonat_proto_enumTypes[0] +} + +func (x Message_MessageType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) } -func (x *Message_MessageType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Message_MessageType_value, data, "Message_MessageType") +// Deprecated: Do not use. +func (x *Message_MessageType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) if err != nil { return err } - *x = Message_MessageType(value) + *x = Message_MessageType(num) return nil } +// Deprecated: Use Message_MessageType.Descriptor instead. func (Message_MessageType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_a04e278ef61ac07a, []int{0, 0} + return file_pb_autonat_proto_rawDescGZIP(), []int{0, 0} } type Message_ResponseStatus int32 @@ -72,21 +86,23 @@ const ( Message_E_INTERNAL_ERROR Message_ResponseStatus = 300 ) -var Message_ResponseStatus_name = map[int32]string{ - 0: "OK", - 100: "E_DIAL_ERROR", - 101: "E_DIAL_REFUSED", - 200: "E_BAD_REQUEST", - 300: "E_INTERNAL_ERROR", -} - -var Message_ResponseStatus_value = map[string]int32{ - "OK": 0, - "E_DIAL_ERROR": 100, - "E_DIAL_REFUSED": 101, - "E_BAD_REQUEST": 200, - "E_INTERNAL_ERROR": 300, -} +// Enum value maps for Message_ResponseStatus. +var ( + Message_ResponseStatus_name = map[int32]string{ + 0: "OK", + 100: "E_DIAL_ERROR", + 101: "E_DIAL_REFUSED", + 200: "E_BAD_REQUEST", + 300: "E_INTERNAL_ERROR", + } + Message_ResponseStatus_value = map[string]int32{ + "OK": 0, + "E_DIAL_ERROR": 100, + "E_DIAL_REFUSED": 101, + "E_BAD_REQUEST": 200, + "E_INTERNAL_ERROR": 300, + } +) func (x Message_ResponseStatus) Enum() *Message_ResponseStatus { p := new(Message_ResponseStatus) @@ -95,1152 +111,414 @@ func (x Message_ResponseStatus) Enum() *Message_ResponseStatus { } func (x Message_ResponseStatus) String() string { - return proto.EnumName(Message_ResponseStatus_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Message_ResponseStatus) Descriptor() protoreflect.EnumDescriptor { + return file_pb_autonat_proto_enumTypes[1].Descriptor() } -func (x *Message_ResponseStatus) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Message_ResponseStatus_value, data, "Message_ResponseStatus") +func (Message_ResponseStatus) Type() protoreflect.EnumType { + return &file_pb_autonat_proto_enumTypes[1] +} + +func (x Message_ResponseStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *Message_ResponseStatus) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) if err != nil { return err } - *x = Message_ResponseStatus(value) + *x = Message_ResponseStatus(num) return nil } +// Deprecated: Use Message_ResponseStatus.Descriptor instead. func (Message_ResponseStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_a04e278ef61ac07a, []int{0, 1} + return file_pb_autonat_proto_rawDescGZIP(), []int{0, 1} } type Message struct { - Type *Message_MessageType `protobuf:"varint,1,opt,name=type,enum=autonat.pb.Message_MessageType" json:"type,omitempty"` - Dial *Message_Dial `protobuf:"bytes,2,opt,name=dial" json:"dial,omitempty"` - DialResponse *Message_DialResponse `protobuf:"bytes,3,opt,name=dialResponse" json:"dialResponse,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} -func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_a04e278ef61ac07a, []int{0} -} -func (m *Message) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Message.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Message) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message.Merge(m, src) -} -func (m *Message) XXX_Size() int { - return m.Size() + Type *Message_MessageType `protobuf:"varint,1,opt,name=type,enum=autonat.pb.Message_MessageType" json:"type,omitempty"` + Dial *Message_Dial `protobuf:"bytes,2,opt,name=dial" json:"dial,omitempty"` + DialResponse *Message_DialResponse `protobuf:"bytes,3,opt,name=dialResponse" json:"dialResponse,omitempty"` } -func (m *Message) XXX_DiscardUnknown() { - xxx_messageInfo_Message.DiscardUnknown(m) -} - -var xxx_messageInfo_Message proto.InternalMessageInfo -func (m *Message) GetType() Message_MessageType { - if m != nil && m.Type != nil { - return *m.Type +func (x *Message) Reset() { + *x = Message{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_autonat_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return Message_DIAL } -func (m *Message) GetDial() *Message_Dial { - if m != nil { - return m.Dial - } - return nil +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Message) GetDialResponse() *Message_DialResponse { - if m != nil { - return m.DialResponse +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_pb_autonat_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -type Message_PeerInfo struct { - Id []byte `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs" json:"addrs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { + return file_pb_autonat_proto_rawDescGZIP(), []int{0} } -func (m *Message_PeerInfo) Reset() { *m = Message_PeerInfo{} } -func (m *Message_PeerInfo) String() string { return proto.CompactTextString(m) } -func (*Message_PeerInfo) ProtoMessage() {} -func (*Message_PeerInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_a04e278ef61ac07a, []int{0, 0} -} -func (m *Message_PeerInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Message_PeerInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Message_PeerInfo.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Message) GetType() Message_MessageType { + if x != nil && x.Type != nil { + return *x.Type } -} -func (m *Message_PeerInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message_PeerInfo.Merge(m, src) -} -func (m *Message_PeerInfo) XXX_Size() int { - return m.Size() -} -func (m *Message_PeerInfo) XXX_DiscardUnknown() { - xxx_messageInfo_Message_PeerInfo.DiscardUnknown(m) + return Message_DIAL } -var xxx_messageInfo_Message_PeerInfo proto.InternalMessageInfo - -func (m *Message_PeerInfo) GetId() []byte { - if m != nil { - return m.Id +func (x *Message) GetDial() *Message_Dial { + if x != nil { + return x.Dial } return nil } -func (m *Message_PeerInfo) GetAddrs() [][]byte { - if m != nil { - return m.Addrs +func (x *Message) GetDialResponse() *Message_DialResponse { + if x != nil { + return x.DialResponse } return nil } -type Message_Dial struct { - Peer *Message_PeerInfo `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +type Message_PeerInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Message_Dial) Reset() { *m = Message_Dial{} } -func (m *Message_Dial) String() string { return proto.CompactTextString(m) } -func (*Message_Dial) ProtoMessage() {} -func (*Message_Dial) Descriptor() ([]byte, []int) { - return fileDescriptor_a04e278ef61ac07a, []int{0, 1} -} -func (m *Message_Dial) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Message_Dial) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Message_Dial.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Message_Dial) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message_Dial.Merge(m, src) -} -func (m *Message_Dial) XXX_Size() int { - return m.Size() -} -func (m *Message_Dial) XXX_DiscardUnknown() { - xxx_messageInfo_Message_Dial.DiscardUnknown(m) + Id []byte `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs" json:"addrs,omitempty"` } -var xxx_messageInfo_Message_Dial proto.InternalMessageInfo - -func (m *Message_Dial) GetPeer() *Message_PeerInfo { - if m != nil { - return m.Peer +func (x *Message_PeerInfo) Reset() { + *x = Message_PeerInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_autonat_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -type Message_DialResponse struct { - Status *Message_ResponseStatus `protobuf:"varint,1,opt,name=status,enum=autonat.pb.Message_ResponseStatus" json:"status,omitempty"` - StatusText *string `protobuf:"bytes,2,opt,name=statusText" json:"statusText,omitempty"` - Addr []byte `protobuf:"bytes,3,opt,name=addr" json:"addr,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *Message_PeerInfo) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Message_DialResponse) Reset() { *m = Message_DialResponse{} } -func (m *Message_DialResponse) String() string { return proto.CompactTextString(m) } -func (*Message_DialResponse) ProtoMessage() {} -func (*Message_DialResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a04e278ef61ac07a, []int{0, 2} -} -func (m *Message_DialResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Message_DialResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Message_DialResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*Message_PeerInfo) ProtoMessage() {} + +func (x *Message_PeerInfo) ProtoReflect() protoreflect.Message { + mi := &file_pb_autonat_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *Message_DialResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message_DialResponse.Merge(m, src) -} -func (m *Message_DialResponse) XXX_Size() int { - return m.Size() -} -func (m *Message_DialResponse) XXX_DiscardUnknown() { - xxx_messageInfo_Message_DialResponse.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_Message_DialResponse proto.InternalMessageInfo - -func (m *Message_DialResponse) GetStatus() Message_ResponseStatus { - if m != nil && m.Status != nil { - return *m.Status - } - return Message_OK +// Deprecated: Use Message_PeerInfo.ProtoReflect.Descriptor instead. +func (*Message_PeerInfo) Descriptor() ([]byte, []int) { + return file_pb_autonat_proto_rawDescGZIP(), []int{0, 0} } -func (m *Message_DialResponse) GetStatusText() string { - if m != nil && m.StatusText != nil { - return *m.StatusText +func (x *Message_PeerInfo) GetId() []byte { + if x != nil { + return x.Id } - return "" + return nil } -func (m *Message_DialResponse) GetAddr() []byte { - if m != nil { - return m.Addr +func (x *Message_PeerInfo) GetAddrs() [][]byte { + if x != nil { + return x.Addrs } return nil } -func init() { - proto.RegisterEnum("autonat.pb.Message_MessageType", Message_MessageType_name, Message_MessageType_value) - proto.RegisterEnum("autonat.pb.Message_ResponseStatus", Message_ResponseStatus_name, Message_ResponseStatus_value) - proto.RegisterType((*Message)(nil), "autonat.pb.Message") - proto.RegisterType((*Message_PeerInfo)(nil), "autonat.pb.Message.PeerInfo") - proto.RegisterType((*Message_Dial)(nil), "autonat.pb.Message.Dial") - proto.RegisterType((*Message_DialResponse)(nil), "autonat.pb.Message.DialResponse") -} +type Message_Dial struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func init() { proto.RegisterFile("autonat.proto", fileDescriptor_a04e278ef61ac07a) } - -var fileDescriptor_a04e278ef61ac07a = []byte{ - // 372 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x90, 0xcf, 0x8a, 0xda, 0x50, - 0x14, 0xc6, 0xbd, 0x31, 0xb5, 0xf6, 0x18, 0xc3, 0xed, 0xa1, 0x85, 0x20, 0x25, 0x0d, 0x59, 0x49, - 0x29, 0x22, 0x76, 0x53, 0xba, 0x53, 0x72, 0x0b, 0xd2, 0x56, 0xed, 0x49, 0x5c, 0x87, 0x94, 0xdc, - 0x0e, 0x01, 0x31, 0x21, 0x89, 0x30, 0x6e, 0xe6, 0x89, 0x66, 0x3b, 0xef, 0xe0, 0x72, 0x1e, 0x61, - 0xf0, 0x49, 0x86, 0x5c, 0xa3, 0xa3, 0xe0, 0xac, 0xce, 0x1f, 0x7e, 0xdf, 0x39, 0x1f, 0x1f, 0x74, - 0xa3, 0x4d, 0x99, 0xae, 0xa3, 0x72, 0x90, 0xe5, 0x69, 0x99, 0x22, 0x9c, 0xc6, 0x7f, 0xee, 0x83, - 0x0e, 0x6f, 0xff, 0xc8, 0xa2, 0x88, 0x6e, 0x24, 0x7e, 0x03, 0xbd, 0xdc, 0x66, 0xd2, 0x62, 0x0e, - 0xeb, 0x9b, 0xa3, 0xcf, 0x83, 0x17, 0x6c, 0x50, 0x23, 0xc7, 0x1a, 0x6c, 0x33, 0x49, 0x0a, 0xc6, - 0xaf, 0xa0, 0xc7, 0x49, 0xb4, 0xb2, 0x34, 0x87, 0xf5, 0x3b, 0x23, 0xeb, 0x9a, 0xc8, 0x4b, 0xa2, - 0x15, 0x29, 0x0a, 0x3d, 0x30, 0xaa, 0x4a, 0xb2, 0xc8, 0xd2, 0x75, 0x21, 0xad, 0xa6, 0x52, 0x39, - 0xaf, 0xaa, 0x6a, 0x8e, 0x2e, 0x54, 0xbd, 0x21, 0xb4, 0x17, 0x52, 0xe6, 0xd3, 0xf5, 0xff, 0x14, - 0x4d, 0xd0, 0x92, 0x58, 0x59, 0x36, 0x48, 0x4b, 0x62, 0xfc, 0x00, 0x6f, 0xa2, 0x38, 0xce, 0x0b, - 0x4b, 0x73, 0x9a, 0x7d, 0x83, 0x0e, 0x43, 0xef, 0x3b, 0xe8, 0xd5, 0x3d, 0x1c, 0x82, 0x9e, 0x49, - 0x99, 0x2b, 0xbe, 0x33, 0xfa, 0x74, 0xed, 0xef, 0xf1, 0x32, 0x29, 0xb2, 0x77, 0x07, 0xc6, 0xb9, - 0x13, 0xfc, 0x01, 0xad, 0xa2, 0x8c, 0xca, 0x4d, 0x51, 0xc7, 0xe4, 0x5e, 0xbb, 0x71, 0xa4, 0x7d, - 0x45, 0x52, 0xad, 0x40, 0x1b, 0xe0, 0xd0, 0x05, 0xf2, 0xb6, 0x54, 0x89, 0xbd, 0xa3, 0xb3, 0x0d, - 0x22, 0xe8, 0x95, 0x5d, 0x95, 0x8a, 0x41, 0xaa, 0x77, 0xbf, 0x40, 0xe7, 0x2c, 0x74, 0x6c, 0x83, - 0xee, 0x4d, 0xc7, 0xbf, 0x79, 0x03, 0xdf, 0x43, 0xb7, 0xea, 0x42, 0x12, 0xfe, 0x62, 0x3e, 0xf3, - 0x05, 0x67, 0x6e, 0x02, 0xe6, 0xe5, 0x67, 0x6c, 0x81, 0x36, 0xff, 0xc5, 0x1b, 0xc8, 0xc1, 0x10, - 0xa1, 0xc2, 0x05, 0xd1, 0x9c, 0x78, 0x8c, 0x08, 0x66, 0xbd, 0x21, 0xf1, 0x73, 0xe9, 0x0b, 0x8f, - 0x4b, 0x44, 0xe8, 0x8a, 0x70, 0x32, 0xf6, 0x42, 0x12, 0x7f, 0x97, 0xc2, 0x0f, 0xf8, 0x8e, 0xe1, - 0x47, 0xe0, 0x22, 0x9c, 0xce, 0x02, 0x41, 0xb3, 0x93, 0xfa, 0x5e, 0x9b, 0x18, 0xbb, 0xbd, 0xcd, - 0x1e, 0xf7, 0x36, 0x7b, 0xda, 0xdb, 0xec, 0x39, 0x00, 0x00, 0xff, 0xff, 0x8e, 0xe2, 0x93, 0x4e, - 0x61, 0x02, 0x00, 0x00, + Peer *Message_PeerInfo `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"` } -func (m *Message) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *Message_Dial) Reset() { + *x = Message_Dial{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_autonat_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *Message) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *Message_Dial) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.DialResponse != nil { - { - size, err := m.DialResponse.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAutonat(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Dial != nil { - { - size, err := m.Dial.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAutonat(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Type != nil { - i = encodeVarintAutonat(dAtA, i, uint64(*m.Type)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} +func (*Message_Dial) ProtoMessage() {} -func (m *Message_PeerInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Message_PeerInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Message_PeerInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Addrs) > 0 { - for iNdEx := len(m.Addrs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Addrs[iNdEx]) - copy(dAtA[i:], m.Addrs[iNdEx]) - i = encodeVarintAutonat(dAtA, i, uint64(len(m.Addrs[iNdEx]))) - i-- - dAtA[i] = 0x12 +func (x *Message_Dial) ProtoReflect() protoreflect.Message { + mi := &file_pb_autonat_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } - if m.Id != nil { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarintAutonat(dAtA, i, uint64(len(m.Id))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Message_Dial) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *Message_Dial) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use Message_Dial.ProtoReflect.Descriptor instead. +func (*Message_Dial) Descriptor() ([]byte, []int) { + return file_pb_autonat_proto_rawDescGZIP(), []int{0, 1} } -func (m *Message_Dial) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Peer != nil { - { - size, err := m.Peer.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAutonat(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa +func (x *Message_Dial) GetPeer() *Message_PeerInfo { + if x != nil { + return x.Peer } - return len(dAtA) - i, nil + return nil } -func (m *Message_DialResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} +type Message_DialResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Message_DialResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + Status *Message_ResponseStatus `protobuf:"varint,1,opt,name=status,enum=autonat.pb.Message_ResponseStatus" json:"status,omitempty"` + StatusText *string `protobuf:"bytes,2,opt,name=statusText" json:"statusText,omitempty"` + Addr []byte `protobuf:"bytes,3,opt,name=addr" json:"addr,omitempty"` } -func (m *Message_DialResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Addr != nil { - i -= len(m.Addr) - copy(dAtA[i:], m.Addr) - i = encodeVarintAutonat(dAtA, i, uint64(len(m.Addr))) - i-- - dAtA[i] = 0x1a +func (x *Message_DialResponse) Reset() { + *x = Message_DialResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_autonat_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - if m.StatusText != nil { - i -= len(*m.StatusText) - copy(dAtA[i:], *m.StatusText) - i = encodeVarintAutonat(dAtA, i, uint64(len(*m.StatusText))) - i-- - dAtA[i] = 0x12 - } - if m.Status != nil { - i = encodeVarintAutonat(dAtA, i, uint64(*m.Status)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil } -func encodeVarintAutonat(dAtA []byte, offset int, v uint64) int { - offset -= sovAutonat(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Message) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Type != nil { - n += 1 + sovAutonat(uint64(*m.Type)) - } - if m.Dial != nil { - l = m.Dial.Size() - n += 1 + l + sovAutonat(uint64(l)) - } - if m.DialResponse != nil { - l = m.DialResponse.Size() - n += 1 + l + sovAutonat(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n +func (x *Message_DialResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Message_PeerInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Id != nil { - l = len(m.Id) - n += 1 + l + sovAutonat(uint64(l)) - } - if len(m.Addrs) > 0 { - for _, b := range m.Addrs { - l = len(b) - n += 1 + l + sovAutonat(uint64(l)) +func (*Message_DialResponse) ProtoMessage() {} + +func (x *Message_DialResponse) ProtoReflect() protoreflect.Message { + mi := &file_pb_autonat_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + return mi.MessageOf(x) } -func (m *Message_Dial) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Peer != nil { - l = m.Peer.Size() - n += 1 + l + sovAutonat(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n +// Deprecated: Use Message_DialResponse.ProtoReflect.Descriptor instead. +func (*Message_DialResponse) Descriptor() ([]byte, []int) { + return file_pb_autonat_proto_rawDescGZIP(), []int{0, 2} } -func (m *Message_DialResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Status != nil { - n += 1 + sovAutonat(uint64(*m.Status)) - } - if m.StatusText != nil { - l = len(*m.StatusText) - n += 1 + l + sovAutonat(uint64(l)) +func (x *Message_DialResponse) GetStatus() Message_ResponseStatus { + if x != nil && x.Status != nil { + return *x.Status } - if m.Addr != nil { - l = len(m.Addr) - n += 1 + l + sovAutonat(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovAutonat(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozAutonat(x uint64) (n int) { - return sovAutonat(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return Message_OK } -func (m *Message) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutonat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Message: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var v Message_MessageType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutonat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= Message_MessageType(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Type = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Dial", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutonat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAutonat - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAutonat - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Dial == nil { - m.Dial = &Message_Dial{} - } - if err := m.Dial.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DialResponse", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutonat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAutonat - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAutonat - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DialResponse == nil { - m.DialResponse = &Message_DialResponse{} - } - if err := m.DialResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAutonat(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAutonat - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthAutonat - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *Message_DialResponse) GetStatusText() string { + if x != nil && x.StatusText != nil { + return *x.StatusText } - return nil + return "" } -func (m *Message_PeerInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutonat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PeerInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PeerInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutonat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAutonat - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthAutonat - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...) - if m.Id == nil { - m.Id = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutonat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAutonat - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthAutonat - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addrs = append(m.Addrs, make([]byte, postIndex-iNdEx)) - copy(m.Addrs[len(m.Addrs)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAutonat(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAutonat - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthAutonat - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *Message_DialResponse) GetAddr() []byte { + if x != nil { + return x.Addr } return nil } -func (m *Message_Dial) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutonat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Dial: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Dial: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutonat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAutonat - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAutonat - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Peer == nil { - m.Peer = &Message_PeerInfo{} - } - if err := m.Peer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAutonat(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAutonat - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthAutonat - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil +var File_pb_autonat_proto protoreflect.FileDescriptor + +var file_pb_autonat_proto_rawDesc = []byte{ + 0x0a, 0x10, 0x70, 0x62, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x2e, 0x70, 0x62, 0x22, 0xb5, + 0x04, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x33, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6e, + 0x61, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x2c, 0x0a, 0x04, 0x64, 0x69, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, + 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x2e, 0x44, 0x69, 0x61, 0x6c, 0x52, 0x04, 0x64, 0x69, 0x61, 0x6c, 0x12, 0x44, 0x0a, + 0x0c, 0x64, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x2e, 0x70, 0x62, + 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x44, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0c, 0x64, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x1a, 0x30, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, + 0x14, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, + 0x61, 0x64, 0x64, 0x72, 0x73, 0x1a, 0x38, 0x0a, 0x04, 0x44, 0x69, 0x61, 0x6c, 0x12, 0x30, 0x0a, + 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x61, 0x75, + 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x2e, 0x50, 0x65, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x1a, + 0x7e, 0x0a, 0x0c, 0x44, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x3a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x22, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x54, 0x65, 0x78, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x54, 0x65, 0x78, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x61, + 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x61, 0x64, 0x64, 0x72, 0x22, + 0x2a, 0x0a, 0x0b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x08, + 0x0a, 0x04, 0x44, 0x49, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x49, 0x41, 0x4c, + 0x5f, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x10, 0x01, 0x22, 0x69, 0x0a, 0x0e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x06, 0x0a, + 0x02, 0x4f, 0x4b, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x5f, 0x44, 0x49, 0x41, 0x4c, 0x5f, + 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x64, 0x12, 0x12, 0x0a, 0x0e, 0x45, 0x5f, 0x44, 0x49, 0x41, + 0x4c, 0x5f, 0x52, 0x45, 0x46, 0x55, 0x53, 0x45, 0x44, 0x10, 0x65, 0x12, 0x12, 0x0a, 0x0d, 0x45, + 0x5f, 0x42, 0x41, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0xc8, 0x01, 0x12, + 0x15, 0x0a, 0x10, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x5f, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x10, 0xac, 0x02, } -func (m *Message_DialResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutonat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + +var ( + file_pb_autonat_proto_rawDescOnce sync.Once + file_pb_autonat_proto_rawDescData = file_pb_autonat_proto_rawDesc +) + +func file_pb_autonat_proto_rawDescGZIP() []byte { + file_pb_autonat_proto_rawDescOnce.Do(func() { + file_pb_autonat_proto_rawDescData = protoimpl.X.CompressGZIP(file_pb_autonat_proto_rawDescData) + }) + return file_pb_autonat_proto_rawDescData +} + +var file_pb_autonat_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_pb_autonat_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_pb_autonat_proto_goTypes = []interface{}{ + (Message_MessageType)(0), // 0: autonat.pb.Message.MessageType + (Message_ResponseStatus)(0), // 1: autonat.pb.Message.ResponseStatus + (*Message)(nil), // 2: autonat.pb.Message + (*Message_PeerInfo)(nil), // 3: autonat.pb.Message.PeerInfo + (*Message_Dial)(nil), // 4: autonat.pb.Message.Dial + (*Message_DialResponse)(nil), // 5: autonat.pb.Message.DialResponse +} +var file_pb_autonat_proto_depIdxs = []int32{ + 0, // 0: autonat.pb.Message.type:type_name -> autonat.pb.Message.MessageType + 4, // 1: autonat.pb.Message.dial:type_name -> autonat.pb.Message.Dial + 5, // 2: autonat.pb.Message.dialResponse:type_name -> autonat.pb.Message.DialResponse + 3, // 3: autonat.pb.Message.Dial.peer:type_name -> autonat.pb.Message.PeerInfo + 1, // 4: autonat.pb.Message.DialResponse.status:type_name -> autonat.pb.Message.ResponseStatus + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_pb_autonat_proto_init() } +func file_pb_autonat_proto_init() { + if File_pb_autonat_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pb_autonat_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DialResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DialResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + file_pb_autonat_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message_PeerInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - var v Message_ResponseStatus - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutonat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= Message_ResponseStatus(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Status = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StatusText", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutonat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAutonat - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAutonat - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.StatusText = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAutonat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAutonat - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthAutonat - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addr = append(m.Addr[:0], dAtA[iNdEx:postIndex]...) - if m.Addr == nil { - m.Addr = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAutonat(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthAutonat - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthAutonat - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipAutonat(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAutonat - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + file_pb_autonat_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message_Dial); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAutonat - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAutonat - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + file_pb_autonat_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message_DialResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - if length < 0 { - return 0, ErrInvalidLengthAutonat - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupAutonat - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthAutonat - } - if depth == 0 { - return iNdEx, nil } } - return 0, io.ErrUnexpectedEOF + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pb_autonat_proto_rawDesc, + NumEnums: 2, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pb_autonat_proto_goTypes, + DependencyIndexes: file_pb_autonat_proto_depIdxs, + EnumInfos: file_pb_autonat_proto_enumTypes, + MessageInfos: file_pb_autonat_proto_msgTypes, + }.Build() + File_pb_autonat_proto = out.File + file_pb_autonat_proto_rawDesc = nil + file_pb_autonat_proto_goTypes = nil + file_pb_autonat_proto_depIdxs = nil } - -var ( - ErrInvalidLengthAutonat = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowAutonat = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupAutonat = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/proto.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/proto.go index 93f273cd..5bb2de06 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/proto.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/proto.go @@ -2,11 +2,13 @@ package autonat import ( "github.com/libp2p/go-libp2p/core/peer" - pb "github.com/libp2p/go-libp2p/p2p/host/autonat/pb" + "github.com/libp2p/go-libp2p/p2p/host/autonat/pb" ma "github.com/multiformats/go-multiaddr" ) +//go:generate protoc --proto_path=$PWD:$PWD/../../.. --go_out=. --go_opt=Mpb/autonat.proto=./pb pb/autonat.proto + // AutoNATProto identifies the autonat service protocol const AutoNATProto = "/libp2p/autonat/1.0.0" diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/svc.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/svc.go index 10136a7d..98b421c9 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/svc.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autonat/svc.go @@ -10,9 +10,10 @@ import ( "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" - pb "github.com/libp2p/go-libp2p/p2p/host/autonat/pb" + "github.com/libp2p/go-libp2p/p2p/host/autonat/pb" + + "github.com/libp2p/go-msgio/pbio" - "github.com/libp2p/go-msgio/protoio" ma "github.com/multiformats/go-multiaddr" ) @@ -69,8 +70,8 @@ func (as *autoNATService) handleStream(s network.Stream) { pid := s.Conn().RemotePeer() log.Debugf("New stream from %s", pid.Pretty()) - r := protoio.NewDelimitedReader(s, maxMsgSize) - w := protoio.NewDelimitedWriter(s) + r := pbio.NewDelimitedReader(s, maxMsgSize) + w := pbio.NewDelimitedWriter(s) var req pb.Message var res pb.Message @@ -99,6 +100,9 @@ func (as *autoNATService) handleStream(s network.Stream) { s.Reset() return } + if as.config.metricsTracer != nil { + as.config.metricsTracer.OutgoingDialResponse(res.GetDialResponse().GetStatus()) + } } func (as *autoNATService) handleDial(p peer.ID, obsaddr ma.Multiaddr, mpi *pb.Message_PeerInfo) *pb.Message_DialResponse { @@ -125,6 +129,9 @@ func (as *autoNATService) handleDial(p peer.ID, obsaddr ma.Multiaddr, mpi *pb.Me // need to know their public IP address, and it needs to be different from our public IP // address. if as.config.dialPolicy.skipDial(obsaddr) { + if as.config.metricsTracer != nil { + as.config.metricsTracer.OutgoingDialRefused(dial_blocked) + } // Note: versions < v0.20.0 return Message_E_DIAL_ERROR here, thus we can not rely on this error code. return newDialResponseError(pb.Message_E_DIAL_REFUSED, "refusing to dial peer with blocked observed address") } @@ -187,6 +194,9 @@ func (as *autoNATService) handleDial(p peer.ID, obsaddr ma.Multiaddr, mpi *pb.Me } if len(addrs) == 0 { + if as.config.metricsTracer != nil { + as.config.metricsTracer.OutgoingDialRefused(no_valid_address) + } // Note: versions < v0.20.0 return Message_E_DIAL_ERROR here, thus we can not rely on this error code. return newDialResponseError(pb.Message_E_DIAL_REFUSED, "no dialable addresses") } @@ -201,6 +211,9 @@ func (as *autoNATService) doDial(pi peer.AddrInfo) *pb.Message_DialResponse { if count >= as.config.throttlePeerMax || (as.config.throttleGlobalMax > 0 && as.globalReqs >= as.config.throttleGlobalMax) { as.mx.Unlock() + if as.config.metricsTracer != nil { + as.config.metricsTracer.OutgoingDialRefused(rate_limited) + } return newDialResponseError(pb.Message_E_DIAL_REFUSED, "too many dials") } as.reqs[pi.ID] = count + 1 diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/autorelay.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/autorelay.go index e4e3568e..59007985 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/autorelay.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/autorelay.go @@ -2,12 +2,14 @@ package autorelay import ( "context" + "errors" "sync" "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" basic "github.com/libp2p/go-libp2p/p2p/host/basic" + "github.com/libp2p/go-libp2p/p2p/host/eventbus" logging "github.com/ipfs/go-log/v2" ma "github.com/multiformats/go-multiaddr" @@ -29,6 +31,8 @@ type AutoRelay struct { host host.Host addrsF basic.AddrsFactory + + metricsTracer MetricsTracer } func NewAutoRelay(bhost *basic.BasicHost, opts ...Option) (*AutoRelay, error) { @@ -46,18 +50,22 @@ func NewAutoRelay(bhost *basic.BasicHost, opts ...Option) (*AutoRelay, error) { r.ctx, r.ctxCancel = context.WithCancel(context.Background()) r.conf = &conf r.relayFinder = newRelayFinder(bhost, conf.peerSource, &conf) + r.metricsTracer = &wrappedMetricsTracer{conf.metricsTracer} bhost.AddrsFactory = r.hostAddrs + return r, nil +} + +func (r *AutoRelay) Start() { r.refCount.Add(1) go func() { defer r.refCount.Done() r.background() }() - return r, nil } func (r *AutoRelay) background() { - subReachability, err := r.host.EventBus().Subscribe(new(event.EvtLocalReachabilityChanged)) + subReachability, err := r.host.EventBus().Subscribe(new(event.EvtLocalReachabilityChanged), eventbus.Name("autorelay (background)")) if err != nil { log.Debug("failed to subscribe to the EvtLocalReachabilityChanged") return @@ -76,11 +84,17 @@ func (r *AutoRelay) background() { evt := ev.(event.EvtLocalReachabilityChanged) switch evt.Reachability { case network.ReachabilityPrivate, network.ReachabilityUnknown: - if err := r.relayFinder.Start(); err != nil { + err := r.relayFinder.Start() + if errors.Is(err, errAlreadyRunning) { + log.Debug("tried to start already running relay finder") + } else if err != nil { log.Errorw("failed to start relay finder", "error", err) + } else { + r.metricsTracer.RelayFinderStatus(true) } case network.ReachabilityPublic: r.relayFinder.Stop() + r.metricsTracer.RelayFinderStatus(false) } r.mx.Lock() r.status = evt.Reachability diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/host.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/host.go index 740ca236..c6bd9c57 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/host.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/host.go @@ -14,6 +14,10 @@ func (h *AutoRelayHost) Close() error { return h.Host.Close() } +func (h *AutoRelayHost) Start() { + h.ar.Start() +} + func NewAutoRelayHost(h host.Host, ar *AutoRelay) *AutoRelayHost { return &AutoRelayHost{Host: h, ar: ar} } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/metrics.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/metrics.go new file mode 100644 index 00000000..8028655b --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/metrics.go @@ -0,0 +1,373 @@ +package autorelay + +import ( + "errors" + + "github.com/libp2p/go-libp2p/p2p/metricshelper" + "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client" + pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb" + "github.com/prometheus/client_golang/prometheus" +) + +const metricNamespace = "libp2p_autorelay" + +var ( + status = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: metricNamespace, + Name: "status", + Help: "relay finder active", + }) + reservationsOpenedTotal = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "reservations_opened_total", + Help: "Reservations Opened", + }, + ) + reservationsClosedTotal = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "reservations_closed_total", + Help: "Reservations Closed", + }, + ) + reservationRequestsOutcomeTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "reservation_requests_outcome_total", + Help: "Reservation Request Outcome", + }, + []string{"request_type", "outcome"}, + ) + + relayAddressesUpdatedTotal = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "relay_addresses_updated_total", + Help: "Relay Addresses Updated Count", + }, + ) + relayAddressesCount = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: metricNamespace, + Name: "relay_addresses_count", + Help: "Relay Addresses Count", + }, + ) + + candidatesCircuitV2SupportTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "candidates_circuit_v2_support_total", + Help: "Candidiates supporting circuit v2", + }, + []string{"support"}, + ) + candidatesTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "candidates_total", + Help: "Candidates Total", + }, + []string{"type"}, + ) + candLoopState = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: metricNamespace, + Name: "candidate_loop_state", + Help: "Candidate Loop State", + }, + ) + + scheduledWorkTime = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricNamespace, + Name: "scheduled_work_time", + Help: "Scheduled Work Times", + }, + []string{"work_type"}, + ) + + desiredReservations = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: metricNamespace, + Name: "desired_reservations", + Help: "Desired Reservations", + }, + ) + + collectors = []prometheus.Collector{ + status, + reservationsOpenedTotal, + reservationsClosedTotal, + reservationRequestsOutcomeTotal, + relayAddressesUpdatedTotal, + relayAddressesCount, + candidatesCircuitV2SupportTotal, + candidatesTotal, + candLoopState, + scheduledWorkTime, + desiredReservations, + } +) + +type candidateLoopState int + +const ( + peerSourceRateLimited candidateLoopState = iota + waitingOnPeerChan + waitingForTrigger + stopped +) + +// MetricsTracer is the interface for tracking metrics for autorelay +type MetricsTracer interface { + RelayFinderStatus(isActive bool) + + ReservationEnded(cnt int) + ReservationOpened(cnt int) + ReservationRequestFinished(isRefresh bool, err error) + + RelayAddressCount(int) + RelayAddressUpdated() + + CandidateChecked(supportsCircuitV2 bool) + CandidateAdded(cnt int) + CandidateRemoved(cnt int) + CandidateLoopState(state candidateLoopState) + + ScheduledWorkUpdated(scheduledWork *scheduledWorkTimes) + + DesiredReservations(int) +} + +type metricsTracer struct{} + +var _ MetricsTracer = &metricsTracer{} + +type metricsTracerSetting struct { + reg prometheus.Registerer +} + +type MetricsTracerOption func(*metricsTracerSetting) + +func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption { + return func(s *metricsTracerSetting) { + if reg != nil { + s.reg = reg + } + } +} + +func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer { + setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer} + for _, opt := range opts { + opt(setting) + } + metricshelper.RegisterCollectors(setting.reg, collectors...) + + // Initialise these counters to 0 otherwise the first reservation requests aren't handled + // correctly when using promql increse function + reservationRequestsOutcomeTotal.WithLabelValues("refresh", "success") + reservationRequestsOutcomeTotal.WithLabelValues("new", "success") + candidatesCircuitV2SupportTotal.WithLabelValues("yes") + candidatesCircuitV2SupportTotal.WithLabelValues("no") + return &metricsTracer{} +} + +func (mt *metricsTracer) RelayFinderStatus(isActive bool) { + if isActive { + status.Set(1) + } else { + status.Set(0) + } +} + +func (mt *metricsTracer) ReservationEnded(cnt int) { + reservationsClosedTotal.Add(float64(cnt)) +} + +func (mt *metricsTracer) ReservationOpened(cnt int) { + reservationsOpenedTotal.Add(float64(cnt)) +} + +func (mt *metricsTracer) ReservationRequestFinished(isRefresh bool, err error) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + + if isRefresh { + *tags = append(*tags, "refresh") + } else { + *tags = append(*tags, "new") + } + *tags = append(*tags, getReservationRequestStatus(err)) + reservationRequestsOutcomeTotal.WithLabelValues(*tags...).Inc() + + if !isRefresh && err == nil { + reservationsOpenedTotal.Inc() + } +} + +func (mt *metricsTracer) RelayAddressUpdated() { + relayAddressesUpdatedTotal.Inc() +} + +func (mt *metricsTracer) RelayAddressCount(cnt int) { + relayAddressesCount.Set(float64(cnt)) +} + +func (mt *metricsTracer) CandidateChecked(supportsCircuitV2 bool) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + if supportsCircuitV2 { + *tags = append(*tags, "yes") + } else { + *tags = append(*tags, "no") + } + candidatesCircuitV2SupportTotal.WithLabelValues(*tags...).Inc() +} + +func (mt *metricsTracer) CandidateAdded(cnt int) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + *tags = append(*tags, "added") + candidatesTotal.WithLabelValues(*tags...).Add(float64(cnt)) +} + +func (mt *metricsTracer) CandidateRemoved(cnt int) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + *tags = append(*tags, "removed") + candidatesTotal.WithLabelValues(*tags...).Add(float64(cnt)) +} + +func (mt *metricsTracer) CandidateLoopState(state candidateLoopState) { + candLoopState.Set(float64(state)) +} + +func (mt *metricsTracer) ScheduledWorkUpdated(scheduledWork *scheduledWorkTimes) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + + *tags = append(*tags, "allowed peer source call") + scheduledWorkTime.WithLabelValues(*tags...).Set(float64(scheduledWork.nextAllowedCallToPeerSource.Unix())) + *tags = (*tags)[:0] + + *tags = append(*tags, "reservation refresh") + scheduledWorkTime.WithLabelValues(*tags...).Set(float64(scheduledWork.nextRefresh.Unix())) + *tags = (*tags)[:0] + + *tags = append(*tags, "clear backoff") + scheduledWorkTime.WithLabelValues(*tags...).Set(float64(scheduledWork.nextBackoff.Unix())) + *tags = (*tags)[:0] + + *tags = append(*tags, "old candidate check") + scheduledWorkTime.WithLabelValues(*tags...).Set(float64(scheduledWork.nextOldCandidateCheck.Unix())) +} + +func (mt *metricsTracer) DesiredReservations(cnt int) { + desiredReservations.Set(float64(cnt)) +} + +func getReservationRequestStatus(err error) string { + if err == nil { + return "success" + } + + status := "err other" + var re client.ReservationError + if errors.As(err, &re) { + switch re.Status { + case pbv2.Status_CONNECTION_FAILED: + return "connection failed" + case pbv2.Status_MALFORMED_MESSAGE: + return "malformed message" + case pbv2.Status_RESERVATION_REFUSED: + return "reservation refused" + case pbv2.Status_PERMISSION_DENIED: + return "permission denied" + case pbv2.Status_RESOURCE_LIMIT_EXCEEDED: + return "resource limit exceeded" + } + } + return status +} + +// wrappedMetricsTracer wraps MetricsTracer and ignores all calls when mt is nil +type wrappedMetricsTracer struct { + mt MetricsTracer +} + +var _ MetricsTracer = &wrappedMetricsTracer{} + +func (mt *wrappedMetricsTracer) RelayFinderStatus(isActive bool) { + if mt.mt != nil { + mt.mt.RelayFinderStatus(isActive) + } +} + +func (mt *wrappedMetricsTracer) ReservationEnded(cnt int) { + if mt.mt != nil { + mt.mt.ReservationEnded(cnt) + } +} + +func (mt *wrappedMetricsTracer) ReservationOpened(cnt int) { + if mt.mt != nil { + mt.mt.ReservationOpened(cnt) + } +} + +func (mt *wrappedMetricsTracer) ReservationRequestFinished(isRefresh bool, err error) { + if mt.mt != nil { + mt.mt.ReservationRequestFinished(isRefresh, err) + } +} + +func (mt *wrappedMetricsTracer) RelayAddressUpdated() { + if mt.mt != nil { + mt.mt.RelayAddressUpdated() + } +} + +func (mt *wrappedMetricsTracer) RelayAddressCount(cnt int) { + if mt.mt != nil { + mt.mt.RelayAddressCount(cnt) + } +} + +func (mt *wrappedMetricsTracer) CandidateChecked(supportsCircuitV2 bool) { + if mt.mt != nil { + mt.mt.CandidateChecked(supportsCircuitV2) + } +} + +func (mt *wrappedMetricsTracer) CandidateAdded(cnt int) { + if mt.mt != nil { + mt.mt.CandidateAdded(cnt) + } +} + +func (mt *wrappedMetricsTracer) CandidateRemoved(cnt int) { + if mt.mt != nil { + mt.mt.CandidateRemoved(cnt) + } +} + +func (mt *wrappedMetricsTracer) ScheduledWorkUpdated(scheduledWork *scheduledWorkTimes) { + if mt.mt != nil { + mt.mt.ScheduledWorkUpdated(scheduledWork) + } +} + +func (mt *wrappedMetricsTracer) DesiredReservations(cnt int) { + if mt.mt != nil { + mt.mt.DesiredReservations(cnt) + } +} + +func (mt *wrappedMetricsTracer) CandidateLoopState(state candidateLoopState) { + if mt.mt != nil { + mt.mt.CandidateLoopState(state) + } +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/options.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/options.go index fd132849..26ba9201 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/options.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/options.go @@ -6,13 +6,24 @@ import ( "time" "github.com/libp2p/go-libp2p/core/peer" - - "github.com/benbjohnson/clock" ) +// AutoRelay will call this function when it needs new candidates because it is +// not connected to the desired number of relays or we get disconnected from one +// of the relays. Implementations must send *at most* numPeers, and close the +// channel when they don't intend to provide any more peers. AutoRelay will not +// call the callback again until the channel is closed. Implementations should +// send new peers, but may send peers they sent before. AutoRelay implements a +// per-peer backoff (see WithBackoff). See WithMinInterval for setting the +// minimum interval between calls to the callback. The context.Context passed +// may be canceled when AutoRelay feels satisfied, it will be canceled when the +// node is shutting down. If the context is canceled you MUST close the output +// channel at some point. +type PeerSource func(ctx context.Context, num int) <-chan peer.AddrInfo + type config struct { - clock clock.Clock - peerSource func(ctx context.Context, num int) <-chan peer.AddrInfo + clock ClockWithInstantTimer + peerSource PeerSource // minimum interval used to call the peerSource callback minInterval time.Duration // see WithMinCandidates @@ -29,17 +40,19 @@ type config struct { // see WithMaxCandidateAge maxCandidateAge time.Duration setMinCandidates bool - enableCircuitV1 bool + // see WithMetricsTracer + metricsTracer MetricsTracer } var defaultConfig = config{ - clock: clock.New(), + clock: RealClock{}, minCandidates: 4, maxCandidates: 20, bootDelay: 3 * time.Minute, backoff: time.Hour, desiredRelays: 2, maxCandidateAge: 30 * time.Minute, + minInterval: 30 * time.Second, } var ( @@ -65,7 +78,7 @@ func WithStaticRelays(static []peer.AddrInfo) Option { c <- static[i] } return c - }, 30*time.Second)(c) + })(c) WithMinCandidates(len(static))(c) WithMaxCandidates(len(static))(c) WithNumRelays(len(static))(c) @@ -75,23 +88,12 @@ func WithStaticRelays(static []peer.AddrInfo) Option { } // WithPeerSource defines a callback for AutoRelay to query for more relay candidates. -// AutoRelay will call this function when it needs new candidates is connected to the desired number of -// relays, and it has enough candidates (in case we get disconnected from one of the relays). -// Implementations must send *at most* numPeers, and close the channel when they don't intend to provide -// any more peers. -// AutoRelay will not call the callback again until the channel is closed. -// Implementations should send new peers, but may send peers they sent before. AutoRelay implements -// a per-peer backoff (see WithBackoff). -// minInterval is the minimum interval this callback is called with, even if AutoRelay needs new candidates. -// The context.Context passed MAY be canceled when AutoRelay feels satisfied, it will be canceled when the node is shutting down. -// If the channel is canceled you MUST close the output channel at some point. -func WithPeerSource(f func(ctx context.Context, numPeers int) <-chan peer.AddrInfo, minInterval time.Duration) Option { +func WithPeerSource(f PeerSource) Option { return func(c *config) error { if c.peerSource != nil { return errAlreadyHavePeerSource } c.peerSource = f - c.minInterval = minInterval return nil } } @@ -148,14 +150,6 @@ func WithBackoff(d time.Duration) Option { } } -// WithCircuitV1Support enables support for circuit v1 relays. -func WithCircuitV1Support() Option { - return func(c *config) error { - c.enableCircuitV1 = true - return nil - } -} - // WithMaxCandidateAge sets the maximum age of a candidate. // When we are connected to the desired number of relays, we don't ask the peer source for new candidates. // This can lead to AutoRelay's candidate list becoming outdated, and means we won't be able @@ -168,9 +162,72 @@ func WithMaxCandidateAge(d time.Duration) Option { } } -func WithClock(cl clock.Clock) Option { +// InstantTimer is a timer that triggers at some instant rather than some duration +type InstantTimer interface { + Reset(d time.Time) bool + Stop() bool + Ch() <-chan time.Time +} + +// ClockWithInstantTimer is a clock that can create timers that trigger at some +// instant rather than some duration +type ClockWithInstantTimer interface { + Now() time.Time + Since(t time.Time) time.Duration + InstantTimer(when time.Time) InstantTimer +} + +type RealTimer struct{ t *time.Timer } + +var _ InstantTimer = (*RealTimer)(nil) + +func (t RealTimer) Ch() <-chan time.Time { + return t.t.C +} + +func (t RealTimer) Reset(d time.Time) bool { + return t.t.Reset(time.Until(d)) +} + +func (t RealTimer) Stop() bool { + return t.t.Stop() +} + +type RealClock struct{} + +var _ ClockWithInstantTimer = RealClock{} + +func (RealClock) Now() time.Time { + return time.Now() +} +func (RealClock) Since(t time.Time) time.Duration { + return time.Since(t) +} +func (RealClock) InstantTimer(when time.Time) InstantTimer { + t := time.NewTimer(time.Until(when)) + return &RealTimer{t} +} + +func WithClock(cl ClockWithInstantTimer) Option { return func(c *config) error { c.clock = cl return nil } } + +// WithMinInterval sets the minimum interval after which peerSource callback will be called for more +// candidates even if AutoRelay needs new candidates. +func WithMinInterval(interval time.Duration) Option { + return func(c *config) error { + c.minInterval = interval + return nil + } +} + +// WithMetricsTracer configures autorelay to use mt to track metrics +func WithMetricsTracer(mt MetricsTracer) Option { + return func(c *config) error { + c.metricsTracer = mt + return nil + } +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/relay_finder.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/relay_finder.go index 851d1422..3133b7a5 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/relay_finder.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/relay_finder.go @@ -14,7 +14,7 @@ import ( "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" basic "github.com/libp2p/go-libp2p/p2p/host/basic" - relayv1 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/relay" + "github.com/libp2p/go-libp2p/p2p/host/eventbus" circuitv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client" circuitv2_proto "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto" @@ -22,13 +22,10 @@ import ( manet "github.com/multiformats/go-multiaddr/net" ) -const ( - protoIDv1 = string(relayv1.ProtoID) - protoIDv2 = string(circuitv2_proto.ProtoIDv2Hop) -) +const protoIDv2 = circuitv2_proto.ProtoIDv2Hop // Terminology: -// Candidate: Once we connect to a node and it supports (v1 / v2) relay protocol, +// Candidate: Once we connect to a node and it supports relay protocol, // we call it a candidate, and consider using it as a relay. // Relay: Out of the list of candidates, we select a relay to connect to. // Currently, we just randomly select a candidate, but we can employ more sophisticated @@ -59,7 +56,7 @@ type relayFinder struct { ctxCancel context.CancelFunc ctxCancelMx sync.Mutex - peerSource func(context.Context, int) <-chan peer.AddrInfo + peerSource PeerSource candidateFound chan struct{} // receives every time we find a new relay candidate candidateMx sync.Mutex @@ -76,13 +73,19 @@ type relayFinder struct { relayUpdated chan struct{} relayMx sync.Mutex - relays map[peer.ID]*circuitv2.Reservation // rsvp will be nil if it is a v1 relay + relays map[peer.ID]*circuitv2.Reservation cachedAddrs []ma.Multiaddr cachedAddrsExpiry time.Time + + // A channel that triggers a run of `runScheduledWork`. + triggerRunScheduledWork chan struct{} + metricsTracer MetricsTracer } -func newRelayFinder(host *basic.BasicHost, peerSource func(context.Context, int) <-chan peer.AddrInfo, conf *config) *relayFinder { +var errAlreadyRunning = errors.New("relayFinder already running") + +func newRelayFinder(host *basic.BasicHost, peerSource PeerSource, conf *config) *relayFinder { if peerSource == nil { panic("Can not create a new relayFinder. Need a Peer Source fn or a list of static relays. Refer to the documentation around `libp2p.EnableAutoRelay`") } @@ -97,16 +100,27 @@ func newRelayFinder(host *basic.BasicHost, peerSource func(context.Context, int) candidateFound: make(chan struct{}, 1), maybeConnectToRelayTrigger: make(chan struct{}, 1), maybeRequestNewCandidates: make(chan struct{}, 1), + triggerRunScheduledWork: make(chan struct{}, 1), relays: make(map[peer.ID]*circuitv2.Reservation), relayUpdated: make(chan struct{}, 1), + metricsTracer: &wrappedMetricsTracer{conf.metricsTracer}, } } +type scheduledWorkTimes struct { + leastFrequentInterval time.Duration + nextRefresh time.Time + nextBackoff time.Time + nextOldCandidateCheck time.Time + nextAllowedCallToPeerSource time.Time +} + func (rf *relayFinder) background(ctx context.Context) { + peerSourceRateLimiter := make(chan struct{}, 1) rf.refCount.Add(1) go func() { defer rf.refCount.Done() - rf.findNodes(ctx) + rf.findNodes(ctx, peerSourceRateLimiter) }() rf.refCount.Add(1) @@ -115,26 +129,42 @@ func (rf *relayFinder) background(ctx context.Context) { rf.handleNewCandidates(ctx) }() - subConnectedness, err := rf.host.EventBus().Subscribe(new(event.EvtPeerConnectednessChanged)) + subConnectedness, err := rf.host.EventBus().Subscribe(new(event.EvtPeerConnectednessChanged), eventbus.Name("autorelay (relay finder)")) if err != nil { log.Error("failed to subscribe to the EvtPeerConnectednessChanged") return } defer subConnectedness.Close() - bootDelayTimer := rf.conf.clock.Timer(rf.conf.bootDelay) + now := rf.conf.clock.Now() + bootDelayTimer := rf.conf.clock.InstantTimer(now.Add(rf.conf.bootDelay)) defer bootDelayTimer.Stop() - refreshTicker := rf.conf.clock.Ticker(rsvpRefreshInterval) - defer refreshTicker.Stop() - backoffTicker := rf.conf.clock.Ticker(rf.conf.backoff / 5) - defer backoffTicker.Stop() - oldCandidateTicker := rf.conf.clock.Ticker(rf.conf.maxCandidateAge / 5) - defer oldCandidateTicker.Stop() - for { - // when true, we need to identify push - var push bool + // This is the least frequent event. It's our fallback timer if we don't have any other work to do. + leastFrequentInterval := rf.conf.minInterval + // Check if leastFrequentInterval is 0 to avoid busy looping + if rf.conf.backoff > leastFrequentInterval || leastFrequentInterval == 0 { + leastFrequentInterval = rf.conf.backoff + } + if rf.conf.maxCandidateAge > leastFrequentInterval || leastFrequentInterval == 0 { + leastFrequentInterval = rf.conf.maxCandidateAge + } + if rsvpRefreshInterval > leastFrequentInterval || leastFrequentInterval == 0 { + leastFrequentInterval = rsvpRefreshInterval + } + + scheduledWork := &scheduledWorkTimes{ + leastFrequentInterval: leastFrequentInterval, + nextRefresh: now.Add(rsvpRefreshInterval), + nextBackoff: now.Add(rf.conf.backoff), + nextOldCandidateCheck: now.Add(rf.conf.maxCandidateAge), + nextAllowedCallToPeerSource: now.Add(-time.Second), // allow immediately + } + workTimer := rf.conf.clock.InstantTimer(rf.runScheduledWork(ctx, now, scheduledWork, peerSourceRateLimiter)) + defer workTimer.Stop() + + for { select { case ev, ok := <-subConnectedness.Out(): if !ok { @@ -144,6 +174,8 @@ func (rf *relayFinder) background(ctx context.Context) { if evt.Connectedness != network.NotConnected { continue } + push := false + rf.relayMx.Lock() if rf.usingRelay(evt.Peer) { // we were disconnected from a relay log.Debugw("disconnected from relay", "id", evt.Peer) @@ -153,85 +185,182 @@ func (rf *relayFinder) background(ctx context.Context) { push = true } rf.relayMx.Unlock() + + if push { + rf.clearCachedAddrsAndSignalAddressChange() + rf.metricsTracer.ReservationEnded(1) + } case <-rf.candidateFound: rf.notifyMaybeConnectToRelay() - case <-bootDelayTimer.C: + case <-bootDelayTimer.Ch(): rf.notifyMaybeConnectToRelay() case <-rf.relayUpdated: - push = true - case now := <-refreshTicker.C: - push = rf.refreshReservations(ctx, now) - case now := <-backoffTicker.C: - rf.candidateMx.Lock() - for id, t := range rf.backoff { - if !t.Add(rf.conf.backoff).After(now) { - log.Debugw("removing backoff for node", "id", id) - delete(rf.backoff, id) - } - } - rf.candidateMx.Unlock() - case now := <-oldCandidateTicker.C: - var deleted bool - rf.candidateMx.Lock() - for id, cand := range rf.candidates { - if !cand.added.Add(rf.conf.maxCandidateAge).After(now) { - deleted = true - log.Debugw("deleting candidate due to age", "id", id) - delete(rf.candidates, id) - } - } - rf.candidateMx.Unlock() - if deleted { - rf.notifyMaybeNeedNewCandidates() - } + rf.clearCachedAddrsAndSignalAddressChange() + case now := <-workTimer.Ch(): + // Note: `now` is not guaranteed to be the current time. It's the time + // that the timer was fired. This is okay because we'll schedule + // future work at a specific time. + nextTime := rf.runScheduledWork(ctx, now, scheduledWork, peerSourceRateLimiter) + workTimer.Reset(nextTime) + case <-rf.triggerRunScheduledWork: + // Ignore the next time because we aren't scheduling any future work here + _ = rf.runScheduledWork(ctx, rf.conf.clock.Now(), scheduledWork, peerSourceRateLimiter) case <-ctx.Done(): return } + } +} - if push { - rf.relayMx.Lock() - rf.cachedAddrs = nil - rf.relayMx.Unlock() - rf.host.SignalAddressChange() +func (rf *relayFinder) clearCachedAddrsAndSignalAddressChange() { + rf.relayMx.Lock() + rf.cachedAddrs = nil + rf.relayMx.Unlock() + rf.host.SignalAddressChange() + + rf.metricsTracer.RelayAddressUpdated() +} + +func (rf *relayFinder) runScheduledWork(ctx context.Context, now time.Time, scheduledWork *scheduledWorkTimes, peerSourceRateLimiter chan<- struct{}) time.Time { + nextTime := now.Add(scheduledWork.leastFrequentInterval) + + if now.After(scheduledWork.nextRefresh) { + scheduledWork.nextRefresh = now.Add(rsvpRefreshInterval) + if rf.refreshReservations(ctx, now) { + rf.clearCachedAddrsAndSignalAddressChange() + } + } + + if now.After(scheduledWork.nextBackoff) { + scheduledWork.nextBackoff = rf.clearBackoff(now) + } + + if now.After(scheduledWork.nextOldCandidateCheck) { + scheduledWork.nextOldCandidateCheck = rf.clearOldCandidates(now) + } + + if now.After(scheduledWork.nextAllowedCallToPeerSource) { + select { + case peerSourceRateLimiter <- struct{}{}: + scheduledWork.nextAllowedCallToPeerSource = now.Add(rf.conf.minInterval) + if scheduledWork.nextAllowedCallToPeerSource.Before(nextTime) { + nextTime = scheduledWork.nextAllowedCallToPeerSource + } + default: + } + } else { + // We still need to schedule this work if it's sooner than nextTime + if scheduledWork.nextAllowedCallToPeerSource.Before(nextTime) { + nextTime = scheduledWork.nextAllowedCallToPeerSource + } + } + + // Find the next time we need to run scheduled work. + if scheduledWork.nextRefresh.Before(nextTime) { + nextTime = scheduledWork.nextRefresh + } + if scheduledWork.nextBackoff.Before(nextTime) { + nextTime = scheduledWork.nextBackoff + } + if scheduledWork.nextOldCandidateCheck.Before(nextTime) { + nextTime = scheduledWork.nextOldCandidateCheck + } + if nextTime == now { + // Only happens in CI with a mock clock + nextTime = nextTime.Add(1) // avoids an infinite loop + } + + rf.metricsTracer.ScheduledWorkUpdated(scheduledWork) + + return nextTime +} + +// clearOldCandidates clears old candidates from the map. Returns the next time +// to run this function. +func (rf *relayFinder) clearOldCandidates(now time.Time) time.Time { + // If we don't have any candidates, we should run this again in rf.conf.maxCandidateAge. + nextTime := now.Add(rf.conf.maxCandidateAge) + + var deleted bool + rf.candidateMx.Lock() + defer rf.candidateMx.Unlock() + for id, cand := range rf.candidates { + expiry := cand.added.Add(rf.conf.maxCandidateAge) + if expiry.After(now) { + if expiry.Before(nextTime) { + nextTime = expiry + } + } else { + log.Debugw("deleting candidate due to age", "id", id) + deleted = true + rf.removeCandidate(id) + } + } + if deleted { + rf.notifyMaybeNeedNewCandidates() + } + + return nextTime +} + +// clearBackoff clears old backoff entries from the map. Returns the next time +// to run this function. +func (rf *relayFinder) clearBackoff(now time.Time) time.Time { + nextTime := now.Add(rf.conf.backoff) + + rf.candidateMx.Lock() + defer rf.candidateMx.Unlock() + for id, t := range rf.backoff { + expiry := t.Add(rf.conf.backoff) + if expiry.After(now) { + if expiry.Before(nextTime) { + nextTime = expiry + } + } else { + log.Debugw("removing backoff for node", "id", id) + delete(rf.backoff, id) } } + + return nextTime } // findNodes accepts nodes from the channel and tests if they support relaying. // It is run on both public and private nodes. // It garbage collects old entries, so that nodes doesn't overflow. // This makes sure that as soon as we need to find relay candidates, we have them available. -func (rf *relayFinder) findNodes(ctx context.Context) { - peerChan := rf.peerSource(ctx, rf.conf.maxCandidates) +// peerSourceRateLimiter is used to limit how often we call the peer source. +func (rf *relayFinder) findNodes(ctx context.Context, peerSourceRateLimiter <-chan struct{}) { + var peerChan <-chan peer.AddrInfo var wg sync.WaitGroup - lastCallToPeerSource := rf.conf.clock.Now() - - timer := newTimer(rf.conf.clock) for { rf.candidateMx.Lock() numCandidates := len(rf.candidates) rf.candidateMx.Unlock() - if peerChan == nil { - now := rf.conf.clock.Now() - nextAllowedCallToPeerSource := lastCallToPeerSource.Add(rf.conf.minInterval).Sub(now) - if numCandidates < rf.conf.minCandidates { - log.Debugw("not enough candidates. Resetting timer", "num", numCandidates, "desired", rf.conf.minCandidates) - timer.Reset(nextAllowedCallToPeerSource) + if peerChan == nil && numCandidates < rf.conf.minCandidates { + rf.metricsTracer.CandidateLoopState(peerSourceRateLimited) + + select { + case <-peerSourceRateLimiter: + peerChan = rf.peerSource(ctx, rf.conf.maxCandidates) + select { + case rf.triggerRunScheduledWork <- struct{}{}: + default: + } + case <-ctx.Done(): + return } } + if peerChan == nil { + rf.metricsTracer.CandidateLoopState(waitingForTrigger) + } else { + rf.metricsTracer.CandidateLoopState(waitingOnPeerChan) + } + select { case <-rf.maybeRequestNewCandidates: continue - case now := <-timer.Chan(): - timer.SetRead() - if peerChan != nil { - // We're still reading peers from the peerChan. No need to query for more peers now. - continue - } - lastCallToPeerSource = now - peerChan = rf.peerSource(ctx, rf.conf.maxCandidates) case pi, ok := <-peerChan: if !ok { wg.Wait() @@ -261,6 +390,7 @@ func (rf *relayFinder) findNodes(ctx context.Context) { } }() case <-ctx.Done(): + rf.metricsTracer.CandidateLoopState(stopped) return } } @@ -287,7 +417,7 @@ func (rf *relayFinder) notifyNewCandidate() { } } -// handleNewNode tests if a peer supports circuit v1 or v2. +// handleNewNode tests if a peer supports circuit v2. // This method is only run on private nodes. // If a peer does, it is added to the candidates map. // Note that just supporting the protocol doesn't guarantee that we can also obtain a reservation. @@ -304,24 +434,31 @@ func (rf *relayFinder) handleNewNode(ctx context.Context, pi peer.AddrInfo) (add supportsV2, err := rf.tryNode(ctx, pi) if err != nil { log.Debugf("node %s not accepted as a candidate: %s", pi.ID, err) + if err == errProtocolNotSupported { + rf.metricsTracer.CandidateChecked(false) + } return false } + rf.metricsTracer.CandidateChecked(true) + rf.candidateMx.Lock() if len(rf.candidates) > rf.conf.maxCandidates { rf.candidateMx.Unlock() return false } log.Debugw("node supports relay protocol", "peer", pi.ID, "supports circuit v2", supportsV2) - rf.candidates[pi.ID] = &candidate{ + rf.addCandidate(&candidate{ added: rf.conf.clock.Now(), ai: pi, supportsRelayV2: supportsV2, - } + }) rf.candidateMx.Unlock() return true } -// tryNode checks if a peer actually supports either circuit v1 or circuit v2. +var errProtocolNotSupported = errors.New("doesn't speak circuit v2") + +// tryNode checks if a peer actually supports either circuit v2. // It does not modify any internal state. func (rf *relayFinder) tryNode(ctx context.Context, pi peer.AddrInfo) (supportsRelayV2 bool, err error) { if err := rf.host.Connect(ctx, pi); err != nil { @@ -356,61 +493,26 @@ func (rf *relayFinder) tryNode(ctx context.Context, pi peer.AddrInfo) (supportsR return false, ctx.Err() } - protos, err := rf.host.Peerstore().SupportsProtocols(pi.ID, protoIDv1, protoIDv2) + protos, err := rf.host.Peerstore().SupportsProtocols(pi.ID, protoIDv2) if err != nil { return false, fmt.Errorf("error checking relay protocol support for peer %s: %w", pi.ID, err) } - - // If the node speaks both, prefer circuit v2 - var maybeSupportsV1, supportsV2 bool - for _, proto := range protos { - switch proto { - case protoIDv1: - maybeSupportsV1 = true - case protoIDv2: - supportsV2 = true - } - } - - if supportsV2 { - return true, nil - } - - if !rf.conf.enableCircuitV1 && !supportsV2 { - return false, errors.New("doesn't speak circuit v2") - } - if !maybeSupportsV1 && !supportsV2 { - return false, errors.New("doesn't speak circuit v1 or v2") + if len(protos) == 0 { + return false, errProtocolNotSupported } - - // The node *may* support circuit v1. - supportsV1, err := relayv1.CanHop(ctx, rf.host, pi.ID) - if err != nil { - return false, fmt.Errorf("CanHop failed: %w", err) - } - if !supportsV1 { - return false, errors.New("doesn't speak circuit v1 or v2") - } - return false, nil + return true, nil } // When a new node that could be a relay is found, we receive a notification on the maybeConnectToRelayTrigger chan. // This function makes sure that we only run one instance of maybeConnectToRelay at once, and buffers // exactly one more trigger event to run maybeConnectToRelay. func (rf *relayFinder) handleNewCandidates(ctx context.Context) { - sem := make(chan struct{}, 1) for { select { case <-ctx.Done(): return case <-rf.maybeConnectToRelayTrigger: - select { - case <-ctx.Done(): - return - case sem <- struct{}{}: - } rf.maybeConnectToRelay(ctx) - <-sem } } } @@ -448,7 +550,7 @@ func (rf *relayFinder) maybeConnectToRelay(ctx context.Context) { rf.relayMx.Unlock() if usingRelay { rf.candidateMx.Lock() - delete(rf.candidates, id) + rf.removeCandidate(id) rf.candidateMx.Unlock() rf.notifyMaybeNeedNewCandidates() continue @@ -457,6 +559,7 @@ func (rf *relayFinder) maybeConnectToRelay(ctx context.Context) { if err != nil { log.Debugw("failed to connect to relay", "peer", id, "error", err) rf.notifyMaybeNeedNewCandidates() + rf.metricsTracer.ReservationRequestFinished(false, err) continue } log.Debugw("adding new relay", "id", id) @@ -473,6 +576,8 @@ func (rf *relayFinder) maybeConnectToRelay(ctx context.Context) { default: } + rf.metricsTracer.ReservationRequestFinished(false, nil) + if numRelays >= rf.conf.desiredRelays { break } @@ -491,7 +596,7 @@ func (rf *relayFinder) connectToRelay(ctx context.Context, cand *candidate) (*ci if rf.host.Network().Connectedness(id) != network.Connected { if err := rf.host.Connect(ctx, cand.ai); err != nil { rf.candidateMx.Lock() - delete(rf.candidates, cand.ai.ID) + rf.removeCandidate(cand.ai.ID) rf.candidateMx.Unlock() return nil, fmt.Errorf("failed to connect: %w", err) } @@ -508,7 +613,7 @@ func (rf *relayFinder) connectToRelay(ctx context.Context, cand *candidate) (*ci } } rf.candidateMx.Lock() - delete(rf.candidates, id) + rf.removeCandidate(id) rf.candidateMx.Unlock() return rsvp, err } @@ -519,15 +624,17 @@ func (rf *relayFinder) refreshReservations(ctx context.Context, now time.Time) b // find reservations about to expire and refresh them in parallel g := new(errgroup.Group) for p, rsvp := range rf.relays { - if rsvp == nil { // this is a circuit v1 relay, there is no reservation - continue - } if now.Add(rsvpExpirationSlack).Before(rsvp.Expiration) { continue } p := p - g.Go(func() error { return rf.refreshRelayReservation(ctx, p) }) + g.Go(func() error { + err := rf.refreshRelayReservation(ctx, p) + rf.metricsTracer.ReservationRequestFinished(true, err) + + return err + }) } rf.relayMx.Unlock() @@ -539,19 +646,22 @@ func (rf *relayFinder) refreshRelayReservation(ctx context.Context, p peer.ID) e rsvp, err := circuitv2.Reserve(ctx, rf.host, peer.AddrInfo{ID: p}) rf.relayMx.Lock() - defer rf.relayMx.Unlock() - if err != nil { log.Debugw("failed to refresh relay slot reservation", "relay", p, "error", err) - + _, exists := rf.relays[p] delete(rf.relays, p) // unprotect the connection rf.host.ConnManager().Unprotect(p, autorelayTag) + rf.relayMx.Unlock() + if exists { + rf.metricsTracer.ReservationEnded(1) + } return err } log.Debugw("refreshed relay slot reservation", "relay", p) rf.relays[p] = rsvp + rf.relayMx.Unlock() return nil } @@ -561,12 +671,32 @@ func (rf *relayFinder) usingRelay(p peer.ID) bool { return ok } +// addCandidates adds a candidate to the candidates set. Assumes caller holds candidateMx mutex +func (rf *relayFinder) addCandidate(cand *candidate) { + _, exists := rf.candidates[cand.ai.ID] + rf.candidates[cand.ai.ID] = cand + if !exists { + rf.metricsTracer.CandidateAdded(1) + } +} + +func (rf *relayFinder) removeCandidate(id peer.ID) { + _, exists := rf.candidates[id] + if exists { + delete(rf.candidates, id) + rf.metricsTracer.CandidateRemoved(1) + } +} + // selectCandidates returns an ordered slice of relay candidates. // Callers should attempt to obtain reservations with the candidates in this order. func (rf *relayFinder) selectCandidates() []*candidate { + now := rf.conf.clock.Now() candidates := make([]*candidate, 0, len(rf.candidates)) for _, cand := range rf.candidates { - candidates = append(candidates, cand) + if cand.added.Add(rf.conf.maxCandidateAge).After(now) { + candidates = append(candidates, cand) + } } // TODO: better relay selection strategy; this just selects random relays, @@ -602,9 +732,10 @@ func (rf *relayFinder) relayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr { } // add relay specific addrs to the list + relayAddrCnt := 0 for p := range rf.relays { addrs := cleanupAddressSet(rf.host.Peerstore().Addrs(p)) - + relayAddrCnt += len(addrs) circuit := ma.StringCast(fmt.Sprintf("/p2p/%s/p2p-circuit", p.Pretty())) for _, addr := range addrs { pub := addr.Encapsulate(circuit) @@ -615,6 +746,7 @@ func (rf *relayFinder) relayAddrs(addrs []ma.Multiaddr) []ma.Multiaddr { rf.cachedAddrs = raddrs rf.cachedAddrsExpiry = rf.conf.clock.Now().Add(30 * time.Second) + rf.metricsTracer.RelayAddressCount(relayAddrCnt) return raddrs } @@ -622,9 +754,12 @@ func (rf *relayFinder) Start() error { rf.ctxCancelMx.Lock() defer rf.ctxCancelMx.Unlock() if rf.ctxCancel != nil { - return errors.New("relayFinder already running") + return errAlreadyRunning } log.Debug("starting relay finder") + + rf.initMetrics() + ctx, cancel := context.WithCancel(context.Background()) rf.ctxCancel = cancel rf.refCount.Add(1) @@ -644,5 +779,32 @@ func (rf *relayFinder) Stop() error { } rf.refCount.Wait() rf.ctxCancel = nil + + rf.resetMetrics() return nil } + +func (rf *relayFinder) initMetrics() { + rf.metricsTracer.DesiredReservations(rf.conf.desiredRelays) + + rf.relayMx.Lock() + rf.metricsTracer.ReservationOpened(len(rf.relays)) + rf.relayMx.Unlock() + + rf.candidateMx.Lock() + rf.metricsTracer.CandidateAdded(len(rf.candidates)) + rf.candidateMx.Unlock() +} + +func (rf *relayFinder) resetMetrics() { + rf.relayMx.Lock() + rf.metricsTracer.ReservationEnded(len(rf.relays)) + rf.relayMx.Unlock() + + rf.candidateMx.Lock() + rf.metricsTracer.CandidateRemoved(len(rf.candidates)) + rf.candidateMx.Unlock() + + rf.metricsTracer.RelayAddressCount(0) + rf.metricsTracer.ScheduledWorkUpdated(&scheduledWorkTimes{}) +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/timer.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/timer.go deleted file mode 100644 index b5544553..00000000 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/autorelay/timer.go +++ /dev/null @@ -1,42 +0,0 @@ -package autorelay - -import ( - "time" - - "github.com/benbjohnson/clock" -) - -type timer struct { - timer *clock.Timer - running bool - read bool -} - -func newTimer(cl clock.Clock) *timer { - t := cl.Timer(100 * time.Hour) // There's no way to initialize a stopped timer - t.Stop() - return &timer{timer: t} -} - -func (t *timer) Chan() <-chan time.Time { - return t.timer.C -} - -func (t *timer) Stop() { - if !t.running { - return - } - if !t.timer.Stop() && !t.read { - <-t.timer.C - } - t.read = false -} - -func (t *timer) SetRead() { - t.read = true -} - -func (t *timer) Reset(d time.Duration) { - t.Stop() - t.timer.Reset(d) -} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/basic_host.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/basic_host.go index 82e29a37..70c40bf1 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/basic_host.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/basic_host.go @@ -18,6 +18,7 @@ import ( "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/core/record" + "github.com/libp2p/go-libp2p/core/transport" "github.com/libp2p/go-libp2p/p2p/host/autonat" "github.com/libp2p/go-libp2p/p2p/host/eventbus" "github.com/libp2p/go-libp2p/p2p/host/pstoremanager" @@ -27,11 +28,12 @@ import ( "github.com/libp2p/go-libp2p/p2p/protocol/holepunch" "github.com/libp2p/go-libp2p/p2p/protocol/identify" "github.com/libp2p/go-libp2p/p2p/protocol/ping" + libp2pwebtransport "github.com/libp2p/go-libp2p/p2p/transport/webtransport" + "github.com/prometheus/client_golang/prometheus" "github.com/libp2p/go-netroute" logging "github.com/ipfs/go-log/v2" - ma "github.com/multiformats/go-multiaddr" madns "github.com/multiformats/go-multiaddr-dns" manet "github.com/multiformats/go-multiaddr/net" @@ -70,7 +72,7 @@ type BasicHost struct { network network.Network psManager *pstoremanager.PeerstoreManager - mux *msmux.MultistreamMuxer + mux *msmux.MultistreamMuxer[protocol.ID] ids identify.IDService hps *holepunch.Service pings *ping.PingService @@ -107,8 +109,11 @@ var _ host.Host = (*BasicHost)(nil) // HostOpts holds options that can be passed to NewHost in order to // customize construction of the *BasicHost. type HostOpts struct { + // EventBus sets the event bus. Will construct a new event bus if omitted. + EventBus event.Bus + // MultistreamMuxer is essential for the *BasicHost and will use a sensible default value if omitted. - MultistreamMuxer *msmux.MultistreamMuxer + MultistreamMuxer *msmux.MultistreamMuxer[protocol.ID] // NegotiationTimeout determines the read and write timeouts on streams. // If 0 or omitted, it will use DefaultNegotiationTimeout. @@ -151,28 +156,36 @@ type HostOpts struct { EnableHolePunching bool // HolePunchingOptions are options for the hole punching service HolePunchingOptions []holepunch.Option + + // EnableMetrics enables the metrics subsystems + EnableMetrics bool + // PrometheusRegisterer is the PrometheusRegisterer used for metrics + PrometheusRegisterer prometheus.Registerer } // NewHost constructs a new *BasicHost and activates it by attaching its stream and connection handlers to the given inet.Network. func NewHost(n network.Network, opts *HostOpts) (*BasicHost, error) { - eventBus := eventbus.NewBus() - psManager, err := pstoremanager.NewPeerstoreManager(n.Peerstore(), eventBus) + if opts == nil { + opts = &HostOpts{} + } + if opts.EventBus == nil { + opts.EventBus = eventbus.NewBus() + } + + psManager, err := pstoremanager.NewPeerstoreManager(n.Peerstore(), opts.EventBus) if err != nil { return nil, err } hostCtx, cancel := context.WithCancel(context.Background()) - if opts == nil { - opts = &HostOpts{} - } h := &BasicHost{ network: n, psManager: psManager, - mux: msmux.NewMultistreamMuxer(), + mux: msmux.NewMultistreamMuxer[protocol.ID](), negtimeout: DefaultNegotiationTimeout, AddrsFactory: DefaultAddrsFactory, maResolver: madns.DefaultResolver, - eventbus: eventBus, + eventbus: opts.EventBus, addrChangeChan: make(chan struct{}, 1), ctx: hostCtx, ctxCancel: cancel, @@ -181,17 +194,12 @@ func NewHost(n network.Network, opts *HostOpts) (*BasicHost, error) { h.updateLocalIpAddr() - if h.emitters.evtLocalProtocolsUpdated, err = h.eventbus.Emitter(&event.EvtLocalProtocolsUpdated{}); err != nil { + if h.emitters.evtLocalProtocolsUpdated, err = h.eventbus.Emitter(&event.EvtLocalProtocolsUpdated{}, eventbus.Stateful); err != nil { return nil, err } if h.emitters.evtLocalAddrsUpdated, err = h.eventbus.Emitter(&event.EvtLocalAddressesUpdated{}, eventbus.Stateful); err != nil { return nil, err } - evtPeerConnectednessChanged, err := h.eventbus.Emitter(&event.EvtPeerConnectednessChanged{}) - if err != nil { - return nil, err - } - h.Network().Notify(newPeerConnectWatcher(evtPeerConnectednessChanged)) if !h.disableSignedPeerRecord { cab, ok := peerstore.GetCertifiedAddrBook(n.Peerstore()) @@ -223,21 +231,22 @@ func NewHost(n network.Network, opts *HostOpts) (*BasicHost, error) { h.mux = opts.MultistreamMuxer } + idOpts := []identify.Option{ + identify.UserAgent(opts.UserAgent), + identify.ProtocolVersion(opts.ProtocolVersion), + } + // we can't set this as a default above because it depends on the *BasicHost. if h.disableSignedPeerRecord { - h.ids, err = identify.NewIDService( - h, - identify.UserAgent(opts.UserAgent), - identify.ProtocolVersion(opts.ProtocolVersion), - identify.DisableSignedPeerRecord(), - ) - } else { - h.ids, err = identify.NewIDService( - h, - identify.UserAgent(opts.UserAgent), - identify.ProtocolVersion(opts.ProtocolVersion), - ) + idOpts = append(idOpts, identify.DisableSignedPeerRecord()) } + if opts.EnableMetrics { + idOpts = append(idOpts, + identify.WithMetricsTracer( + identify.NewMetricsTracer(identify.WithRegisterer(opts.PrometheusRegisterer)))) + } + + h.ids, err = identify.NewIDService(h, idOpts...) if err != nil { return nil, fmt.Errorf("failed to create Identify service: %s", err) } @@ -273,6 +282,13 @@ func NewHost(n network.Network, opts *HostOpts) (*BasicHost, error) { } if opts.EnableRelayService { + if opts.EnableMetrics { + // Prefer explicitly provided metrics tracer + metricsOpt := []relayv2.Option{ + relayv2.WithMetricsTracer( + relayv2.NewMetricsTracer(relayv2.WithRegisterer(opts.PrometheusRegisterer)))} + opts.RelayServiceOpts = append(metricsOpt, opts.RelayServiceOpts...) + } h.relayManager = relaysvc.NewRelayManager(h, opts.RelayServiceOpts...) } @@ -367,6 +383,7 @@ func (h *BasicHost) updateLocalIpAddr() { func (h *BasicHost) Start() { h.psManager.Start() h.refCount.Add(1) + h.ids.Start() go h.background() } @@ -407,7 +424,7 @@ func (h *BasicHost) newStreamHandler(s network.Stream) { } } - if err := s.SetProtocol(protocol.ID(protoID)); err != nil { + if err := s.SetProtocol(protoID); err != nil { log.Debugf("error setting stream protocol: %s", err) s.Reset() return @@ -571,9 +588,9 @@ func (h *BasicHost) EventBus() event.Bus { // // (Threadsafe) func (h *BasicHost) SetStreamHandler(pid protocol.ID, handler network.StreamHandler) { - h.Mux().AddHandler(string(pid), func(p string, rwc io.ReadWriteCloser) error { + h.Mux().AddHandler(pid, func(p protocol.ID, rwc io.ReadWriteCloser) error { is := rwc.(network.Stream) - is.SetProtocol(protocol.ID(p)) + is.SetProtocol(p) handler(is) return nil }) @@ -584,10 +601,10 @@ func (h *BasicHost) SetStreamHandler(pid protocol.ID, handler network.StreamHand // SetStreamHandlerMatch sets the protocol handler on the Host's Mux // using a matching function to do protocol comparisons -func (h *BasicHost) SetStreamHandlerMatch(pid protocol.ID, m func(string) bool, handler network.StreamHandler) { - h.Mux().AddHandlerWithFunc(string(pid), m, func(p string, rwc io.ReadWriteCloser) error { +func (h *BasicHost) SetStreamHandlerMatch(pid protocol.ID, m func(protocol.ID) bool, handler network.StreamHandler) { + h.Mux().AddHandlerWithFunc(pid, m, func(p protocol.ID, rwc io.ReadWriteCloser) error { is := rwc.(network.Stream) - is.SetProtocol(protocol.ID(p)) + is.SetProtocol(p) handler(is) return nil }) @@ -598,7 +615,7 @@ func (h *BasicHost) SetStreamHandlerMatch(pid protocol.ID, m func(string) bool, // RemoveStreamHandler returns .. func (h *BasicHost) RemoveStreamHandler(pid protocol.ID) { - h.Mux().RemoveHandler(string(pid)) + h.Mux().RemoveHandler(pid) h.emitters.evtLocalProtocolsUpdated.Emit(event.EvtLocalProtocolsUpdated{ Removed: []protocol.ID{pid}, }) @@ -620,8 +637,13 @@ func (h *BasicHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.I } } - s, err := h.Network().NewStream(ctx, p) + s, err := h.Network().NewStream(network.WithNoDial(ctx, "already dialed"), p) if err != nil { + // TODO: It would be nicer to get the actual error from the swarm, + // but this will require some more work. + if errors.Is(err, network.ErrNoConn) { + return nil, errors.New("connection failed") + } return nil, err } @@ -637,9 +659,7 @@ func (h *BasicHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.I return nil, ctx.Err() } - pidStrings := protocol.ConvertToStrings(pids) - - pref, err := h.preferredProtocol(p, pidStrings) + pref, err := h.preferredProtocol(p, pids) if err != nil { _ = s.Reset() return nil, err @@ -647,7 +667,7 @@ func (h *BasicHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.I if pref != "" { s.SetProtocol(pref) - lzcon := msmux.NewMSSelect(s, string(pref)) + lzcon := msmux.NewMSSelect(s, pref) return &streamWrapper{ Stream: s, rw: lzcon, @@ -655,10 +675,10 @@ func (h *BasicHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.I } // Negotiate the protocol in the background, obeying the context. - var selected string + var selected protocol.ID errCh := make(chan error, 1) go func() { - selected, err = msmux.SelectOneOf(pidStrings, s) + selected, err = msmux.SelectOneOf(pids, s) errCh <- err }() select { @@ -674,13 +694,12 @@ func (h *BasicHost) NewStream(ctx context.Context, p peer.ID, pids ...protocol.I return nil, ctx.Err() } - selpid := protocol.ID(selected) - s.SetProtocol(selpid) + s.SetProtocol(selected) h.Peerstore().AddProtocols(p, selected) return s, nil } -func (h *BasicHost) preferredProtocol(p peer.ID, pids []string) (protocol.ID, error) { +func (h *BasicHost) preferredProtocol(p peer.ID, pids []protocol.ID) (protocol.ID, error) { supported, err := h.Peerstore().SupportsProtocols(p, pids...) if err != nil { return "", err @@ -688,7 +707,7 @@ func (h *BasicHost) preferredProtocol(p peer.ID, pids []string) (protocol.ID, er var out protocol.ID if len(supported) > 0 { - out = protocol.ID(supported[0]) + out = supported[0] } return out, nil } @@ -743,21 +762,56 @@ func (h *BasicHost) ConnManager() connmgr.ConnManager { // Addrs returns listening addresses that are safe to announce to the network. // The output is the same as AllAddrs, but processed by AddrsFactory. func (h *BasicHost) Addrs() []ma.Multiaddr { - return h.AddrsFactory(h.AllAddrs()) + // This is a temporary workaround/hack that fixes #2233. Once we have a + // proper address pipeline, rework this. See the issue for more context. + type transportForListeninger interface { + TransportForListening(a ma.Multiaddr) transport.Transport + } + + type addCertHasher interface { + AddCertHashes(m ma.Multiaddr) (ma.Multiaddr, bool) + } + + addrs := h.AddrsFactory(h.AllAddrs()) + + s, ok := h.Network().(transportForListeninger) + if !ok { + return addrs + } + + // Copy addrs slice since we'll be modifying it. + addrsOld := addrs + addrs = make([]ma.Multiaddr, len(addrsOld)) + copy(addrs, addrsOld) + + for i, addr := range addrs { + if ok, n := libp2pwebtransport.IsWebtransportMultiaddr(addr); ok && n == 0 { + t := s.TransportForListening(addr) + tpt, ok := t.(addCertHasher) + if !ok { + continue + } + addrWithCerthash, added := tpt.AddCertHashes(addr) + addrs[i] = addrWithCerthash + if !added { + log.Debug("Couldn't add certhashes to webtransport multiaddr because we aren't listening on webtransport") + } + } + } + return addrs } -// mergeAddrs merges input address lists, leave only unique addresses -func dedupAddrs(addrs []ma.Multiaddr) (uniqueAddrs []ma.Multiaddr) { - exists := make(map[string]bool) - for _, addr := range addrs { - k := string(addr.Bytes()) - if exists[k] { - continue +// NormalizeMultiaddr returns a multiaddr suitable for equality checks. +// If the multiaddr is a webtransport component, it removes the certhashes. +func (h *BasicHost) NormalizeMultiaddr(addr ma.Multiaddr) ma.Multiaddr { + if ok, n := libp2pwebtransport.IsWebtransportMultiaddr(addr); ok && n > 0 { + out := addr + for i := 0; i < n; i++ { + out, _ = ma.SplitLast(out) } - exists[k] = true - uniqueAddrs = append(uniqueAddrs, addr) + return out } - return uniqueAddrs + return addr } // AllAddrs returns all the addresses of BasicHost at this moment in time. @@ -771,7 +825,6 @@ func (h *BasicHost) AllAddrs() []ma.Multiaddr { h.addrMu.RLock() filteredIfaceAddrs := h.filteredInterfaceAddrs allIfaceAddrs := h.allInterfaceAddrs - autonat := h.autoNat h.addrMu.RUnlock() // Iterate over all _unresolved_ listen addresses, resolving our primary @@ -785,20 +838,7 @@ func (h *BasicHost) AllAddrs() []ma.Multiaddr { finalAddrs = append(finalAddrs, resolved...) } - // add autonat PublicAddr Consider the following scenario - // For example, it is deployed on a cloud server, - // it provides an elastic ip accessible to the public network, - // but not have an external network card, - // so net.InterfaceAddrs() not has the public ip - // The host can indeed be dialed !!! - if autonat != nil { - publicAddr, _ := autonat.PublicAddr() - if publicAddr != nil { - finalAddrs = append(finalAddrs, publicAddr) - } - } - - finalAddrs = dedupAddrs(finalAddrs) + finalAddrs = network.DedupAddrs(finalAddrs) var natMappings []inat.Mapping @@ -948,8 +988,83 @@ func (h *BasicHost) AllAddrs() []ma.Multiaddr { } finalAddrs = append(finalAddrs, observedAddrs...) } + finalAddrs = network.DedupAddrs(finalAddrs) + finalAddrs = inferWebtransportAddrsFromQuic(finalAddrs) + + return finalAddrs +} + +var wtComponent = ma.StringCast("/webtransport") + +// inferWebtransportAddrsFromQuic infers more webtransport addresses from QUIC addresses. +// This is useful when we discover our public QUIC address, but haven't discovered our public WebTransport addrs. +// If we see that we are listening on the same port for QUIC and WebTransport, +// we can be pretty sure that the WebTransport addr will be reachable if the +// QUIC one is. +// We assume the input is deduped. +func inferWebtransportAddrsFromQuic(in []ma.Multiaddr) []ma.Multiaddr { + // We need to check if we are listening on the same ip+port for QUIC and WebTransport. + // If not, there's nothing to do since we can't infer anything. + + // Count the number of QUIC addrs, this will let us allocate just once at the beginning. + quicAddrCount := 0 + for _, addr := range in { + if _, lastComponent := ma.SplitLast(addr); lastComponent.Protocol().Code == ma.P_QUIC_V1 { + quicAddrCount++ + } + } + quicOrWebtransportAddrs := make(map[string]struct{}, quicAddrCount) + webtransportAddrs := make(map[string]struct{}, quicAddrCount) + foundSameListeningAddr := false + for _, addr := range in { + isWebtransport, numCertHashes := libp2pwebtransport.IsWebtransportMultiaddr(addr) + if isWebtransport { + for i := 0; i < numCertHashes; i++ { + // Remove certhashes + addr, _ = ma.SplitLast(addr) + } + webtransportAddrs[addr.String()] = struct{}{} + // Remove webtransport component, now it's a multiaddr that ends in /quic-v1 + addr, _ = ma.SplitLast(addr) + } + + if _, lastComponent := ma.SplitLast(addr); lastComponent.Protocol().Code == ma.P_QUIC_V1 { + addrStr := addr.String() + if _, ok := quicOrWebtransportAddrs[addrStr]; ok { + foundSameListeningAddr = true + } else { + quicOrWebtransportAddrs[addrStr] = struct{}{} + } + } + } + + if !foundSameListeningAddr { + return in + } + + if len(webtransportAddrs) == 0 { + // No webtransport addresses, we aren't listening on any webtransport + // address, so we shouldn't add any. + return in + } + + out := make([]ma.Multiaddr, 0, len(in)+(quicAddrCount-len(webtransportAddrs))) + for _, addr := range in { + // Add all the original addresses + out = append(out, addr) + if _, lastComponent := ma.SplitLast(addr); lastComponent.Protocol().Code == ma.P_QUIC_V1 { + // Convert quic to webtransport + addr = addr.Encapsulate(wtComponent) + if _, ok := webtransportAddrs[addr.String()]; ok { + // We already have this address + continue + } + // Add the new inferred address + out = append(out, addr) + } + } - return dedupAddrs(finalAddrs) + return out } // SetAutoNat sets the autonat service for the host. diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/peer_connectedness.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/peer_connectedness.go deleted file mode 100644 index bfc46ed8..00000000 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/basic/peer_connectedness.go +++ /dev/null @@ -1,71 +0,0 @@ -package basichost - -import ( - "sync" - - "github.com/libp2p/go-libp2p/core/event" - "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/peer" - - ma "github.com/multiformats/go-multiaddr" -) - -type peerConnectWatcher struct { - emitter event.Emitter - - mutex sync.Mutex - connected map[peer.ID]struct{} -} - -var _ network.Notifiee = &peerConnectWatcher{} - -func newPeerConnectWatcher(emitter event.Emitter) *peerConnectWatcher { - return &peerConnectWatcher{ - emitter: emitter, - connected: make(map[peer.ID]struct{}), - } -} - -func (w *peerConnectWatcher) Listen(network.Network, ma.Multiaddr) {} -func (w *peerConnectWatcher) ListenClose(network.Network, ma.Multiaddr) {} - -func (w *peerConnectWatcher) Connected(n network.Network, conn network.Conn) { - p := conn.RemotePeer() - w.handleTransition(p, n.Connectedness(p)) -} - -func (w *peerConnectWatcher) Disconnected(n network.Network, conn network.Conn) { - p := conn.RemotePeer() - w.handleTransition(p, n.Connectedness(p)) -} - -func (w *peerConnectWatcher) handleTransition(p peer.ID, state network.Connectedness) { - if changed := w.checkTransition(p, state); !changed { - return - } - w.emitter.Emit(event.EvtPeerConnectednessChanged{ - Peer: p, - Connectedness: state, - }) -} - -func (w *peerConnectWatcher) checkTransition(p peer.ID, state network.Connectedness) bool { - w.mutex.Lock() - defer w.mutex.Unlock() - switch state { - case network.Connected: - if _, ok := w.connected[p]; ok { - return false - } - w.connected[p] = struct{}{} - return true - case network.NotConnected: - if _, ok := w.connected[p]; ok { - delete(w.connected, p) - return true - } - return false - default: - return false - } -} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/blank/blank.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/blank/blank.go index 16753eb0..9f3daeff 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/blank/blank.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/blank/blank.go @@ -27,7 +27,7 @@ var log = logging.Logger("blankhost") // BlankHost is the thinnest implementation of the host.Host interface type BlankHost struct { n network.Network - mux *mstream.MultistreamMuxer + mux *mstream.MultistreamMuxer[protocol.ID] cmgr connmgr.ConnManager eventbus event.Bus emitters struct { @@ -65,10 +65,10 @@ func NewBlankHost(n network.Network, options ...Option) *BlankHost { bh := &BlankHost{ n: n, cmgr: cfg.cmgr, - mux: mstream.NewMultistreamMuxer(), + mux: mstream.NewMultistreamMuxer[protocol.ID](), } if bh.eventbus == nil { - bh.eventbus = eventbus.NewBus() + bh.eventbus = eventbus.NewBus(eventbus.WithMetricsTracer(eventbus.NewMetricsTracer())) } // subscribe the connection manager to network notifications (has no effect with NullConnMgr) @@ -78,11 +78,6 @@ func NewBlankHost(n network.Network, options ...Option) *BlankHost { if bh.emitters.evtLocalProtocolsUpdated, err = bh.eventbus.Emitter(&event.EvtLocalProtocolsUpdated{}); err != nil { return nil } - evtPeerConnectednessChanged, err := bh.eventbus.Emitter(&event.EvtPeerConnectednessChanged{}) - if err != nil { - return nil - } - n.Notify(newPeerConnectWatcher(evtPeerConnectednessChanged)) n.SetStreamHandler(bh.newStreamHandler) @@ -158,35 +153,29 @@ func (bh *BlankHost) NewStream(ctx context.Context, p peer.ID, protos ...protoco return nil, err } - protoStrs := make([]string, len(protos)) - for i, pid := range protos { - protoStrs[i] = string(pid) - } - - selected, err := mstream.SelectOneOf(protoStrs, s) + selected, err := mstream.SelectOneOf(protos, s) if err != nil { s.Reset() return nil, err } - selpid := protocol.ID(selected) - s.SetProtocol(selpid) + s.SetProtocol(selected) bh.Peerstore().AddProtocols(p, selected) return s, nil } func (bh *BlankHost) RemoveStreamHandler(pid protocol.ID) { - bh.Mux().RemoveHandler(string(pid)) + bh.Mux().RemoveHandler(pid) bh.emitters.evtLocalProtocolsUpdated.Emit(event.EvtLocalProtocolsUpdated{ Removed: []protocol.ID{pid}, }) } func (bh *BlankHost) SetStreamHandler(pid protocol.ID, handler network.StreamHandler) { - bh.Mux().AddHandler(string(pid), func(p string, rwc io.ReadWriteCloser) error { + bh.Mux().AddHandler(pid, func(p protocol.ID, rwc io.ReadWriteCloser) error { is := rwc.(network.Stream) - is.SetProtocol(protocol.ID(p)) + is.SetProtocol(p) handler(is) return nil }) @@ -195,10 +184,10 @@ func (bh *BlankHost) SetStreamHandler(pid protocol.ID, handler network.StreamHan }) } -func (bh *BlankHost) SetStreamHandlerMatch(pid protocol.ID, m func(string) bool, handler network.StreamHandler) { - bh.Mux().AddHandlerWithFunc(string(pid), m, func(p string, rwc io.ReadWriteCloser) error { +func (bh *BlankHost) SetStreamHandlerMatch(pid protocol.ID, m func(protocol.ID) bool, handler network.StreamHandler) { + bh.Mux().AddHandlerWithFunc(pid, m, func(p protocol.ID, rwc io.ReadWriteCloser) error { is := rwc.(network.Stream) - is.SetProtocol(protocol.ID(p)) + is.SetProtocol(p) handler(is) return nil }) @@ -216,7 +205,7 @@ func (bh *BlankHost) newStreamHandler(s network.Stream) { return } - s.SetProtocol(protocol.ID(protoID)) + s.SetProtocol(protoID) go handle(protoID, s) } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/blank/peer_connectedness.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/blank/peer_connectedness.go deleted file mode 100644 index 4f70540f..00000000 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/blank/peer_connectedness.go +++ /dev/null @@ -1,71 +0,0 @@ -package blankhost - -import ( - "sync" - - "github.com/libp2p/go-libp2p/core/event" - "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/peer" - - ma "github.com/multiformats/go-multiaddr" -) - -type peerConnectWatcher struct { - emitter event.Emitter - - mutex sync.Mutex - connected map[peer.ID]struct{} -} - -var _ network.Notifiee = &peerConnectWatcher{} - -func newPeerConnectWatcher(emitter event.Emitter) *peerConnectWatcher { - return &peerConnectWatcher{ - emitter: emitter, - connected: make(map[peer.ID]struct{}), - } -} - -func (w *peerConnectWatcher) Listen(network.Network, ma.Multiaddr) {} -func (w *peerConnectWatcher) ListenClose(network.Network, ma.Multiaddr) {} - -func (w *peerConnectWatcher) Connected(n network.Network, conn network.Conn) { - p := conn.RemotePeer() - w.handleTransition(p, n.Connectedness(p)) -} - -func (w *peerConnectWatcher) Disconnected(n network.Network, conn network.Conn) { - p := conn.RemotePeer() - w.handleTransition(p, n.Connectedness(p)) -} - -func (w *peerConnectWatcher) handleTransition(p peer.ID, state network.Connectedness) { - if changed := w.checkTransition(p, state); !changed { - return - } - w.emitter.Emit(event.EvtPeerConnectednessChanged{ - Peer: p, - Connectedness: state, - }) -} - -func (w *peerConnectWatcher) checkTransition(p peer.ID, state network.Connectedness) bool { - w.mutex.Lock() - defer w.mutex.Unlock() - switch state { - case network.Connected: - if _, ok := w.connected[p]; ok { - return false - } - w.connected[p] = struct{}{} - return true - case network.NotConnected: - if _, ok := w.connected[p]; ok { - delete(w.connected, p) - return true - } - return false - default: - return false - } -} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/LICENSE-APACHE b/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/LICENSE-APACHE deleted file mode 100644 index 14478a3b..00000000 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/LICENSE-APACHE +++ /dev/null @@ -1,5 +0,0 @@ -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/LICENSE-MIT b/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/LICENSE-MIT deleted file mode 100644 index 72dc60d8..00000000 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/LICENSE-MIT +++ /dev/null @@ -1,19 +0,0 @@ -The MIT License (MIT) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/basic.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/basic.go index 6ab6c410..42365a79 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/basic.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/basic.go @@ -15,46 +15,56 @@ import ( // basicBus is a type-based event delivery system type basicBus struct { - lk sync.RWMutex - nodes map[reflect.Type]*node - wildcard *wildcardNode + lk sync.RWMutex + nodes map[reflect.Type]*node + wildcard *wildcardNode + metricsTracer MetricsTracer } var _ event.Bus = (*basicBus)(nil) type emitter struct { - n *node - w *wildcardNode - typ reflect.Type - closed int32 - dropper func(reflect.Type) + n *node + w *wildcardNode + typ reflect.Type + closed atomic.Bool + dropper func(reflect.Type) + metricsTracer MetricsTracer } func (e *emitter) Emit(evt interface{}) error { - if atomic.LoadInt32(&e.closed) != 0 { + if e.closed.Load() { return fmt.Errorf("emitter is closed") } + e.n.emit(evt) e.w.emit(evt) + if e.metricsTracer != nil { + e.metricsTracer.EventEmitted(e.typ) + } return nil } func (e *emitter) Close() error { - if !atomic.CompareAndSwapInt32(&e.closed, 0, 1) { + if !e.closed.CompareAndSwap(false, true) { return fmt.Errorf("closed an emitter more than once") } - if atomic.AddInt32(&e.n.nEmitters, -1) == 0 { + if e.n.nEmitters.Add(-1) == 0 { e.dropper(e.typ) } return nil } -func NewBus() event.Bus { - return &basicBus{ +func NewBus(opts ...Option) event.Bus { + bus := &basicBus{ nodes: map[reflect.Type]*node{}, - wildcard: new(wildcardNode), + wildcard: &wildcardNode{}, + } + for _, opt := range opts { + opt(bus) } + return bus } func (b *basicBus) withNode(typ reflect.Type, cb func(*node), async func(*node)) { @@ -62,7 +72,7 @@ func (b *basicBus) withNode(typ reflect.Type, cb func(*node), async func(*node)) n, ok := b.nodes[typ] if !ok { - n = newNode(typ) + n = newNode(typ, b.metricsTracer) b.nodes[typ] = n } @@ -90,7 +100,7 @@ func (b *basicBus) tryDropNode(typ reflect.Type) { } n.lk.Lock() - if atomic.LoadInt32(&n.nEmitters) > 0 || len(n.sinks) > 0 { + if n.nEmitters.Load() > 0 || len(n.sinks) > 0 { n.lk.Unlock() b.lk.Unlock() return // still in use @@ -102,8 +112,10 @@ func (b *basicBus) tryDropNode(typ reflect.Type) { } type wildcardSub struct { - ch chan interface{} - w *wildcardNode + ch chan interface{} + w *wildcardNode + metricsTracer MetricsTracer + name string } func (w *wildcardSub) Out() <-chan interface{} { @@ -112,13 +124,31 @@ func (w *wildcardSub) Out() <-chan interface{} { func (w *wildcardSub) Close() error { w.w.removeSink(w.ch) + if w.metricsTracer != nil { + w.metricsTracer.RemoveSubscriber(reflect.TypeOf(event.WildcardSubscription)) + } return nil } +func (w *wildcardSub) Name() string { + return w.name +} + +type namedSink struct { + name string + ch chan interface{} +} + type sub struct { - ch chan interface{} - nodes []*node - dropper func(reflect.Type) + ch chan interface{} + nodes []*node + dropper func(reflect.Type) + metricsTracer MetricsTracer + name string +} + +func (s *sub) Name() string { + return s.name } func (s *sub) Out() <-chan interface{} { @@ -137,14 +167,18 @@ func (s *sub) Close() error { n.lk.Lock() for i := 0; i < len(n.sinks); i++ { - if n.sinks[i] == s.ch { + if n.sinks[i].ch == s.ch { n.sinks[i], n.sinks[len(n.sinks)-1] = n.sinks[len(n.sinks)-1], nil n.sinks = n.sinks[:len(n.sinks)-1] + + if s.metricsTracer != nil { + s.metricsTracer.RemoveSubscriber(n.typ) + } break } } - tryDrop := len(n.sinks) == 0 && atomic.LoadInt32(&n.nEmitters) == 0 + tryDrop := len(n.sinks) == 0 && n.nEmitters.Load() == 0 n.lk.Unlock() @@ -162,7 +196,7 @@ var _ event.Subscription = (*sub)(nil) // publishers to get blocked. CancelFunc is guaranteed to return after last send // to the channel func (b *basicBus) Subscribe(evtTypes interface{}, opts ...event.SubscriptionOpt) (_ event.Subscription, err error) { - settings := subSettingsDefault + settings := newSubSettings() for _, opt := range opts { if err := opt(&settings); err != nil { return nil, err @@ -171,10 +205,12 @@ func (b *basicBus) Subscribe(evtTypes interface{}, opts ...event.SubscriptionOpt if evtTypes == event.WildcardSubscription { out := &wildcardSub{ - ch: make(chan interface{}, settings.buffer), - w: b.wildcard, + ch: make(chan interface{}, settings.buffer), + w: b.wildcard, + metricsTracer: b.metricsTracer, + name: settings.name, } - b.wildcard.addSink(out.ch) + b.wildcard.addSink(&namedSink{ch: out.ch, name: out.name}) return out, nil } @@ -195,7 +231,9 @@ func (b *basicBus) Subscribe(evtTypes interface{}, opts ...event.SubscriptionOpt ch: make(chan interface{}, settings.buffer), nodes: make([]*node, len(types)), - dropper: b.tryDropNode, + dropper: b.tryDropNode, + metricsTracer: b.metricsTracer, + name: settings.name, } for _, etyp := range types { @@ -208,8 +246,11 @@ func (b *basicBus) Subscribe(evtTypes interface{}, opts ...event.SubscriptionOpt typ := reflect.TypeOf(etyp) b.withNode(typ.Elem(), func(n *node) { - n.sinks = append(n.sinks, out.ch) + n.sinks = append(n.sinks, &namedSink{ch: out.ch, name: out.name}) out.nodes[i] = n + if b.metricsTracer != nil { + b.metricsTracer.AddSubscriber(typ.Elem()) + } }, func(n *node) { if n.keepLast { l := n.last @@ -253,9 +294,9 @@ func (b *basicBus) Emitter(evtType interface{}, opts ...event.EmitterOpt) (e eve typ = typ.Elem() b.withNode(typ, func(n *node) { - atomic.AddInt32(&n.nEmitters, 1) + n.nEmitters.Add(1) n.keepLast = n.keepLast || settings.makeStateful - e = &emitter{n: n, typ: typ, dropper: b.tryDropNode, w: b.wildcard} + e = &emitter{n: n, typ: typ, dropper: b.tryDropNode, w: b.wildcard, metricsTracer: b.metricsTracer} }, nil) return } @@ -278,22 +319,27 @@ func (b *basicBus) GetAllEventTypes() []reflect.Type { type wildcardNode struct { sync.RWMutex - nSinks int32 - sinks []chan interface{} + nSinks atomic.Int32 + sinks []*namedSink + metricsTracer MetricsTracer } -func (n *wildcardNode) addSink(ch chan interface{}) { - atomic.AddInt32(&n.nSinks, 1) // ok to do outside the lock +func (n *wildcardNode) addSink(sink *namedSink) { + n.nSinks.Add(1) // ok to do outside the lock n.Lock() - n.sinks = append(n.sinks, ch) + n.sinks = append(n.sinks, sink) n.Unlock() + + if n.metricsTracer != nil { + n.metricsTracer.AddSubscriber(reflect.TypeOf(event.WildcardSubscription)) + } } func (n *wildcardNode) removeSink(ch chan interface{}) { - atomic.AddInt32(&n.nSinks, -1) // ok to do outside the lock + n.nSinks.Add(-1) // ok to do outside the lock n.Lock() for i := 0; i < len(n.sinks); i++ { - if n.sinks[i] == ch { + if n.sinks[i].ch == ch { n.sinks[i], n.sinks[len(n.sinks)-1] = n.sinks[len(n.sinks)-1], nil n.sinks = n.sinks[:len(n.sinks)-1] break @@ -303,13 +349,18 @@ func (n *wildcardNode) removeSink(ch chan interface{}) { } func (n *wildcardNode) emit(evt interface{}) { - if atomic.LoadInt32(&n.nSinks) == 0 { + if n.nSinks.Load() == 0 { return } n.RLock() - for _, ch := range n.sinks { - ch <- evt + for _, sink := range n.sinks { + + // Sending metrics before sending on channel allows us to + // record channel full events before blocking + sendSubscriberMetrics(n.metricsTracer, sink) + + sink.ch <- evt } n.RUnlock() } @@ -321,17 +372,19 @@ type node struct { typ reflect.Type // emitter ref count - nEmitters int32 + nEmitters atomic.Int32 keepLast bool last interface{} - sinks []chan interface{} + sinks []*namedSink + metricsTracer MetricsTracer } -func newNode(typ reflect.Type) *node { +func newNode(typ reflect.Type, metricsTracer MetricsTracer) *node { return &node{ - typ: typ, + typ: typ, + metricsTracer: metricsTracer, } } @@ -346,8 +399,20 @@ func (n *node) emit(evt interface{}) { n.last = evt } - for _, ch := range n.sinks { - ch <- evt + for _, sink := range n.sinks { + + // Sending metrics before sending on channel allows us to + // record channel full events before blocking + sendSubscriberMetrics(n.metricsTracer, sink) + sink.ch <- evt } n.lk.Unlock() } + +func sendSubscriberMetrics(metricsTracer MetricsTracer, sink *namedSink) { + if metricsTracer != nil { + metricsTracer.SubscriberQueueLength(sink.name, len(sink.ch)+1) + metricsTracer.SubscriberQueueFull(sink.name, len(sink.ch)+1 >= cap(sink.ch)) + metricsTracer.SubscriberEventQueued(sink.name) + } +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/basic_metrics.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/basic_metrics.go new file mode 100644 index 00000000..8e7b1e88 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/basic_metrics.go @@ -0,0 +1,164 @@ +package eventbus + +import ( + "reflect" + "strings" + + "github.com/libp2p/go-libp2p/p2p/metricshelper" + + "github.com/prometheus/client_golang/prometheus" +) + +const metricNamespace = "libp2p_eventbus" + +var ( + eventsEmitted = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "events_emitted_total", + Help: "Events Emitted", + }, + []string{"event"}, + ) + totalSubscribers = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricNamespace, + Name: "subscribers_total", + Help: "Number of subscribers for an event type", + }, + []string{"event"}, + ) + subscriberQueueLength = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricNamespace, + Name: "subscriber_queue_length", + Help: "Subscriber queue length", + }, + []string{"subscriber_name"}, + ) + subscriberQueueFull = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: metricNamespace, + Name: "subscriber_queue_full", + Help: "Subscriber Queue completely full", + }, + []string{"subscriber_name"}, + ) + subscriberEventQueued = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "subscriber_event_queued", + Help: "Event Queued for subscriber", + }, + []string{"subscriber_name"}, + ) + collectors = []prometheus.Collector{ + eventsEmitted, + totalSubscribers, + subscriberQueueLength, + subscriberQueueFull, + subscriberEventQueued, + } +) + +// MetricsTracer tracks metrics for the eventbus subsystem +type MetricsTracer interface { + + // EventEmitted counts the total number of events grouped by event type + EventEmitted(typ reflect.Type) + + // AddSubscriber adds a subscriber for the event type + AddSubscriber(typ reflect.Type) + + // RemoveSubscriber removes a subscriber for the event type + RemoveSubscriber(typ reflect.Type) + + // SubscriberQueueLength is the length of the subscribers channel + SubscriberQueueLength(name string, n int) + + // SubscriberQueueFull tracks whether a subscribers channel if full + SubscriberQueueFull(name string, isFull bool) + + // SubscriberEventQueued counts the total number of events grouped by subscriber + SubscriberEventQueued(name string) +} + +type metricsTracer struct{} + +var _ MetricsTracer = &metricsTracer{} + +type metricsTracerSetting struct { + reg prometheus.Registerer +} + +type MetricsTracerOption func(*metricsTracerSetting) + +func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption { + return func(s *metricsTracerSetting) { + if reg != nil { + s.reg = reg + } + } +} + +func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer { + setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer} + for _, opt := range opts { + opt(setting) + } + metricshelper.RegisterCollectors(setting.reg, collectors...) + return &metricsTracer{} +} + +func (m *metricsTracer) EventEmitted(typ reflect.Type) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + + *tags = append(*tags, strings.TrimPrefix(typ.String(), "event.")) + eventsEmitted.WithLabelValues(*tags...).Inc() +} + +func (m *metricsTracer) AddSubscriber(typ reflect.Type) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + + *tags = append(*tags, strings.TrimPrefix(typ.String(), "event.")) + totalSubscribers.WithLabelValues(*tags...).Inc() +} + +func (m *metricsTracer) RemoveSubscriber(typ reflect.Type) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + + *tags = append(*tags, strings.TrimPrefix(typ.String(), "event.")) + totalSubscribers.WithLabelValues(*tags...).Dec() +} + +func (m *metricsTracer) SubscriberQueueLength(name string, n int) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + + *tags = append(*tags, name) + subscriberQueueLength.WithLabelValues(*tags...).Set(float64(n)) +} + +func (m *metricsTracer) SubscriberQueueFull(name string, isFull bool) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + + *tags = append(*tags, name) + observer := subscriberQueueFull.WithLabelValues(*tags...) + if isFull { + observer.Set(1) + } else { + observer.Set(0) + } +} + +func (m *metricsTracer) SubscriberEventQueued(name string) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + + *tags = append(*tags, name) + subscriberEventQueued.WithLabelValues(*tags...).Inc() +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/opts.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/opts.go index a8eae6f2..837a0683 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/opts.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/eventbus/opts.go @@ -1,13 +1,44 @@ package eventbus +import ( + "fmt" + "runtime" + "strings" + "sync/atomic" +) + type subSettings struct { buffer int + name string } +var subCnt atomic.Int64 + var subSettingsDefault = subSettings{ buffer: 16, } +// newSubSettings returns the settings for a new subscriber +// The default naming strategy is sub--L +func newSubSettings() subSettings { + settings := subSettingsDefault + _, file, line, ok := runtime.Caller(2) // skip=1 is eventbus.Subscriber + if ok { + file = strings.TrimPrefix(file, "github.com/") + // remove the version number from the path, for example + // go-libp2p-package@v0.x.y-some-hash-123/file.go will be shortened go go-libp2p-package/file.go + if idx1 := strings.Index(file, "@"); idx1 != -1 { + if idx2 := strings.Index(file[idx1:], "/"); idx2 != -1 { + file = file[:idx1] + file[idx1+idx2:] + } + } + settings.name = fmt.Sprintf("%s-L%d", file, line) + } else { + settings.name = fmt.Sprintf("subscriber-%d", subCnt.Add(1)) + } + return settings +} + func BufSize(n int) func(interface{}) error { return func(s interface{}) error { s.(*subSettings).buffer = n @@ -15,6 +46,13 @@ func BufSize(n int) func(interface{}) error { } } +func Name(name string) func(interface{}) error { + return func(s interface{}) error { + s.(*subSettings).name = name + return nil + } +} + type emitterSettings struct { makeStateful bool } @@ -30,3 +68,12 @@ func Stateful(s interface{}) error { s.(*emitterSettings).makeStateful = true return nil } + +type Option func(*basicBus) + +func WithMetricsTracer(metricsTracer MetricsTracer) Option { + return func(bus *basicBus) { + bus.metricsTracer = metricsTracer + bus.wildcard.metricsTracer = metricsTracer + } +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/addr_book.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/addr_book.go index 28aa6d4d..67f9f914 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/addr_book.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/addr_book.go @@ -46,7 +46,10 @@ type addrSegment struct { } func (segments *addrSegments) get(p peer.ID) *addrSegment { - return segments[byte(p[len(p)-1])] + if len(p) == 0 { // it's not terribly useful to use an empty peer ID, but at least we should not panic + return segments[0] + } + return segments[uint8(p[len(p)-1])] } type clock interface { @@ -235,11 +238,16 @@ func (mab *memoryAddrBook) addAddrsUnlocked(s *addrSegment, p peer.ID, addrs []m exp := mab.clock.Now().Add(ttl) for _, addr := range addrs { + // Remove suffix of /p2p/peer-id from address + addr, addrPid := peer.SplitAddr(addr) if addr == nil { - log.Warnw("was passed nil multiaddr", "peer", p) + log.Warnw("Was passed nil multiaddr", "peer", p) + continue + } + if addrPid != "" && addrPid != p { + log.Warnf("Was passed p2p address with a different peerId. found: %s, expected: %s", addrPid, p) continue } - // find the highest TTL and Expiry time between // existing records and function args a, found := amap[string(addr.Bytes())] // won't allocate. @@ -280,10 +288,15 @@ func (mab *memoryAddrBook) SetAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Du exp := mab.clock.Now().Add(ttl) for _, addr := range addrs { + addr, addrPid := peer.SplitAddr(addr) if addr == nil { log.Warnw("was passed nil multiaddr", "peer", p) continue } + if addrPid != "" && addrPid != p { + log.Warnf("was passed p2p address with a different peerId, found: %s wanted: %s", addrPid, p) + continue + } aBytes := addr.Bytes() key := string(aBytes) diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/protobook.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/protobook.go index 7a955c07..0000f97f 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/protobook.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem/protobook.go @@ -6,11 +6,12 @@ import ( "github.com/libp2p/go-libp2p/core/peer" pstore "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/protocol" ) type protoSegment struct { sync.RWMutex - protocols map[peer.ID]map[string]struct{} + protocols map[peer.ID]map[protocol.ID]struct{} } type protoSegments [256]*protoSegment @@ -27,7 +28,7 @@ type memoryProtoBook struct { maxProtos int lk sync.RWMutex - interned map[string]string + interned map[protocol.ID]protocol.ID } var _ pstore.ProtoBook = (*memoryProtoBook)(nil) @@ -43,11 +44,11 @@ func WithMaxProtocols(num int) ProtoBookOption { func NewProtoBook(opts ...ProtoBookOption) (*memoryProtoBook, error) { pb := &memoryProtoBook{ - interned: make(map[string]string, 256), + interned: make(map[protocol.ID]protocol.ID, 256), segments: func() (ret protoSegments) { for i := range ret { ret[i] = &protoSegment{ - protocols: make(map[peer.ID]map[string]struct{}), + protocols: make(map[peer.ID]map[protocol.ID]struct{}), } } return ret @@ -63,7 +64,7 @@ func NewProtoBook(opts ...ProtoBookOption) (*memoryProtoBook, error) { return pb, nil } -func (pb *memoryProtoBook) internProtocol(proto string) string { +func (pb *memoryProtoBook) internProtocol(proto protocol.ID) protocol.ID { // check if it is interned with the read lock pb.lk.RLock() interned, ok := pb.interned[proto] @@ -87,12 +88,12 @@ func (pb *memoryProtoBook) internProtocol(proto string) string { return proto } -func (pb *memoryProtoBook) SetProtocols(p peer.ID, protos ...string) error { +func (pb *memoryProtoBook) SetProtocols(p peer.ID, protos ...protocol.ID) error { if len(protos) > pb.maxProtos { return errTooManyProtocols } - newprotos := make(map[string]struct{}, len(protos)) + newprotos := make(map[protocol.ID]struct{}, len(protos)) for _, proto := range protos { newprotos[pb.internProtocol(proto)] = struct{}{} } @@ -105,14 +106,14 @@ func (pb *memoryProtoBook) SetProtocols(p peer.ID, protos ...string) error { return nil } -func (pb *memoryProtoBook) AddProtocols(p peer.ID, protos ...string) error { +func (pb *memoryProtoBook) AddProtocols(p peer.ID, protos ...protocol.ID) error { s := pb.segments.get(p) s.Lock() defer s.Unlock() protomap, ok := s.protocols[p] if !ok { - protomap = make(map[string]struct{}) + protomap = make(map[protocol.ID]struct{}) s.protocols[p] = protomap } if len(protomap)+len(protos) > pb.maxProtos { @@ -125,12 +126,12 @@ func (pb *memoryProtoBook) AddProtocols(p peer.ID, protos ...string) error { return nil } -func (pb *memoryProtoBook) GetProtocols(p peer.ID) ([]string, error) { +func (pb *memoryProtoBook) GetProtocols(p peer.ID) ([]protocol.ID, error) { s := pb.segments.get(p) s.RLock() defer s.RUnlock() - out := make([]string, 0, len(s.protocols[p])) + out := make([]protocol.ID, 0, len(s.protocols[p])) for k := range s.protocols[p] { out = append(out, k) } @@ -138,7 +139,7 @@ func (pb *memoryProtoBook) GetProtocols(p peer.ID) ([]string, error) { return out, nil } -func (pb *memoryProtoBook) RemoveProtocols(p peer.ID, protos ...string) error { +func (pb *memoryProtoBook) RemoveProtocols(p peer.ID, protos ...protocol.ID) error { s := pb.segments.get(p) s.Lock() defer s.Unlock() @@ -155,12 +156,12 @@ func (pb *memoryProtoBook) RemoveProtocols(p peer.ID, protos ...string) error { return nil } -func (pb *memoryProtoBook) SupportsProtocols(p peer.ID, protos ...string) ([]string, error) { +func (pb *memoryProtoBook) SupportsProtocols(p peer.ID, protos ...protocol.ID) ([]protocol.ID, error) { s := pb.segments.get(p) s.RLock() defer s.RUnlock() - out := make([]string, 0, len(protos)) + out := make([]protocol.ID, 0, len(protos)) for _, proto := range protos { if _, ok := s.protocols[p][proto]; ok { out = append(out, proto) @@ -170,7 +171,7 @@ func (pb *memoryProtoBook) SupportsProtocols(p peer.ID, protos ...string) ([]str return out, nil } -func (pb *memoryProtoBook) FirstSupportedProtocol(p peer.ID, protos ...string) (string, error) { +func (pb *memoryProtoBook) FirstSupportedProtocol(p peer.ID, protos ...protocol.ID) (protocol.ID, error) { s := pb.segments.get(p) s.RLock() defer s.RUnlock() diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/pstoremanager/pstoremanager.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/pstoremanager/pstoremanager.go index f8382c70..d9550f49 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/pstoremanager/pstoremanager.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/pstoremanager/pstoremanager.go @@ -9,6 +9,7 @@ import ( "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/p2p/host/eventbus" logging "github.com/ipfs/go-log/v2" ) @@ -68,7 +69,7 @@ func NewPeerstoreManager(pstore peerstore.Peerstore, eventBus event.Bus, opts .. func (m *PeerstoreManager) Start() { ctx, cancel := context.WithCancel(context.Background()) m.cancel = cancel - sub, err := m.eventBus.Subscribe(&event.EvtPeerConnectednessChanged{}) + sub, err := m.eventBus.Subscribe(&event.EvtPeerConnectednessChanged{}, eventbus.Name("pstoremanager")) if err != nil { log.Warnf("subscription failed. Peerstore manager not activated. Error: %s", err) return @@ -108,7 +109,8 @@ func (m *PeerstoreManager) background(ctx context.Context, sub event.Subscriptio // If we reconnect to the peer before we've cleared the information, keep it. delete(disconnected, p) } - case now := <-ticker.C: + case <-ticker.C: + now := time.Now() for p, disconnectTime := range disconnected { if disconnectTime.Add(m.gracePeriod).Before(now) { m.pstore.RemovePeer(p) diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/relaysvc/relay.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/relaysvc/relay.go index a36e20e0..f9bbc758 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/relaysvc/relay.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/relaysvc/relay.go @@ -4,11 +4,11 @@ import ( "context" "sync" - relayv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay" - "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/p2p/host/eventbus" + relayv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay" ) type RelayManager struct { @@ -44,7 +44,7 @@ func (m *RelayManager) background(ctx context.Context) { m.mutex.Unlock() }() - subReachability, _ := m.host.EventBus().Subscribe(new(event.EvtLocalReachabilityChanged)) + subReachability, _ := m.host.EventBus().Subscribe(new(event.EvtLocalReachabilityChanged), eventbus.Name("relaysvc")) defer subReachability.Close() for { @@ -65,14 +65,19 @@ func (m *RelayManager) background(ctx context.Context) { func (m *RelayManager) reachabilityChanged(r network.Reachability) error { switch r { case network.ReachabilityPublic: + m.mutex.Lock() + defer m.mutex.Unlock() + // This could happen if two consecutive EvtLocalReachabilityChanged report the same reachability. + // This shouldn't happen, but it's safer to double-check. + if m.relay != nil { + return nil + } relay, err := relayv2.New(m.host, m.opts...) if err != nil { return err } - m.mutex.Lock() - defer m.mutex.Unlock() m.relay = relay - case network.ReachabilityPrivate: + default: m.mutex.Lock() defer m.mutex.Unlock() if m.relay != nil { diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/README.md b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/README.md index 96edd1d0..85926100 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/README.md +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/README.md @@ -28,14 +28,27 @@ scalingLimits := rcmgr.DefaultLimits // Add limits around included libp2p protocols libp2p.SetDefaultServiceLimits(&scalingLimits) -// Turn the scaling limits into a static set of limits using `.AutoScale`. This +// Turn the scaling limits into a concrete set of limits using `.AutoScale`. This // scales the limits proportional to your system memory. -limits := scalingLimits.AutoScale() +scaledDefaultLimits := scalingLimits.AutoScale() + +// Tweak certain settings +cfg := rcmgr.PartialLimitConfig{ + System: rcmgr.ResourceLimits{ + // Allow unlimited outbound streams + StreamsOutbound: rcmgr.Unlimited, + }, + // Everything else is default. The exact values will come from `scaledDefaultLimits` above. +} + +// Create our limits by using our cfg and replacing the default values with values from `scaledDefaultLimits` +limits := cfg.Build(scaledDefaultLimits) // The resource manager expects a limiter, se we create one from our limits. limiter := rcmgr.NewFixedLimiter(limits) -// (Optional if you want metrics) Construct the OpenCensus metrics reporter. +// (Optional if you want metrics) +rcmgrObs.MustRegisterWith(prometheus.DefaultRegisterer) str, err := rcmgrObs.NewStatsTraceReporter() if err != nil { panic(err) @@ -51,6 +64,54 @@ if err != nil { host, err := libp2p.New(libp2p.ResourceManager(rm)) ``` +### Saving the limits config +The easiest way to save the defined limits is to serialize the `PartialLimitConfig` +type as JSON. + +```go +noisyNeighbor, _ := peer.Decode("QmVvtzcZgCkMnSFf2dnrBPXrWuNFWNM9J3MpZQCvWPuVZf") +cfg := rcmgr.PartialLimitConfig{ + System: &rcmgr.ResourceLimits{ + // Allow unlimited outbound streams + StreamsOutbound: rcmgr.Unlimited, + }, + Peer: map[peer.ID]rcmgr.ResourceLimits{ + noisyNeighbor: { + // No inbound connections from this peer + ConnsInbound: rcmgr.BlockAllLimit, + // But let me open connections to them + Conns: rcmgr.DefaultLimit, + ConnsOutbound: rcmgr.DefaultLimit, + // No inbound streams from this peer + StreamsInbound: rcmgr.BlockAllLimit, + // And let me open unlimited (by me) outbound streams (the peer may have their own limits on me) + StreamsOutbound: rcmgr.Unlimited, + }, + }, +} +jsonBytes, _ := json.Marshal(&cfg) + +// string(jsonBytes) +// { +// "System": { +// "StreamsOutbound": "unlimited" +// }, +// "Peer": { +// "QmVvtzcZgCkMnSFf2dnrBPXrWuNFWNM9J3MpZQCvWPuVZf": { +// "StreamsInbound": "blockAll", +// "StreamsOutbound": "unlimited", +// "ConnsInbound": "blockAll" +// } +// } +// } +``` + +This will omit defaults from the JSON output. It will also serialize the +blockAll, and unlimited values explicitly. + +The `Memory` field is serialized as a string to workaround the JSON limitation +of 32 bit integers (`Memory` is an int64). + ## Basic Resources ### Memory @@ -278,7 +339,7 @@ This is done using the `ScalingLimitConfig`. For every scope, this configuration struct defines the absolutely bare minimum limits, and an (optional) increase of these limits, which will be applied on nodes that have sufficient memory. -A `ScalingLimitConfig` can be converted into a `LimitConfig` (which can then be +A `ScalingLimitConfig` can be converted into a `ConcreteLimitConfig` (which can then be used to initialize a fixed limiter with `NewFixedLimiter`) by calling the `Scale` method. The `Scale` method takes two parameters: the amount of memory and the number of file descriptors that an application is willing to dedicate to libp2p. @@ -346,7 +407,7 @@ go-libp2p process. For the default definitions see [`DefaultLimits` and If the defaults seem mostly okay, but you want to adjust one facet you can simply copy the default struct object and update the field you want to change. You can -apply changes to a `BaseLimit`, `BaseLimitIncrease`, and `LimitConfig` with +apply changes to a `BaseLimit`, `BaseLimitIncrease`, and `ConcreteLimitConfig` with `.Apply`. Example @@ -386,7 +447,7 @@ Example Log: The log line above is an example log line that gets emitted if you enable debug logging in the resource manager. You can do this by setting the environment -variable `GOLOG_LOG_LEVEL="rcmgr=info"`. By default only the error is +variable `GOLOG_LOG_LEVEL="rcmgr=debug"`. By default only the error is returned to the caller, and nothing is logged by the resource manager itself. The log line message (and returned error) will tell you which resource limit was @@ -427,10 +488,10 @@ your limits often. This could be a sign that you need to raise your limits (your process is more intensive than you originally thought) or that you need to fix something in your application (surely you don't need over 1000 streams?). -There are OpenCensus metrics that can be hooked up to the resource manager. See +There are Prometheus metrics that can be hooked up to the resource manager. See `obs/stats_test.go` for an example on how to enable this, and `DefaultViews` in `stats.go` for recommended views. These metrics can be hooked up to Prometheus -or any other OpenCensus supported platform. +or any other platform that can scrape a prometheus endpoint. There is also an included Grafana dashboard to help kickstart your observability into the resource manager. Find more information about it at diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/error.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/error.go index 4b02672b..1e87e00a 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/error.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/error.go @@ -6,13 +6,13 @@ import ( "github.com/libp2p/go-libp2p/core/network" ) -type errStreamOrConnLimitExceeded struct { +type ErrStreamOrConnLimitExceeded struct { current, attempted, limit int err error } -func (e *errStreamOrConnLimitExceeded) Error() string { return e.err.Error() } -func (e *errStreamOrConnLimitExceeded) Unwrap() error { return e.err } +func (e *ErrStreamOrConnLimitExceeded) Error() string { return e.err.Error() } +func (e *ErrStreamOrConnLimitExceeded) Unwrap() error { return e.err } // edge may be "" if this is not an edge error func logValuesStreamLimit(scope, edge string, dir network.Direction, stat network.ScopeStat, err error) []interface{} { @@ -22,7 +22,7 @@ func logValuesStreamLimit(scope, edge string, dir network.Direction, stat networ logValues = append(logValues, "edge", edge) } logValues = append(logValues, "direction", dir) - var e *errStreamOrConnLimitExceeded + var e *ErrStreamOrConnLimitExceeded if errors.As(err, &e) { logValues = append(logValues, "current", e.current, @@ -41,7 +41,7 @@ func logValuesConnLimit(scope, edge string, dir network.Direction, usefd bool, s logValues = append(logValues, "edge", edge) } logValues = append(logValues, "direction", dir, "usefd", usefd) - var e *errStreamOrConnLimitExceeded + var e *ErrStreamOrConnLimitExceeded if errors.As(err, &e) { logValues = append(logValues, "current", e.current, @@ -52,14 +52,14 @@ func logValuesConnLimit(scope, edge string, dir network.Direction, usefd bool, s return append(logValues, "stat", stat, "error", err) } -type errMemoryLimitExceeded struct { +type ErrMemoryLimitExceeded struct { current, attempted, limit int64 priority uint8 err error } -func (e *errMemoryLimitExceeded) Error() string { return e.err.Error() } -func (e *errMemoryLimitExceeded) Unwrap() error { return e.err } +func (e *ErrMemoryLimitExceeded) Error() string { return e.err.Error() } +func (e *ErrMemoryLimitExceeded) Unwrap() error { return e.err } // edge may be "" if this is not an edge error func logValuesMemoryLimit(scope, edge string, stat network.ScopeStat, err error) []interface{} { @@ -68,7 +68,7 @@ func logValuesMemoryLimit(scope, edge string, stat network.ScopeStat, err error) if edge != "" { logValues = append(logValues, "edge", edge) } - var e *errMemoryLimitExceeded + var e *ErrMemoryLimitExceeded if errors.As(err, &e) { logValues = append(logValues, "current", e.current, diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/extapi.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/extapi.go index 302678e1..03edcd79 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/extapi.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/extapi.go @@ -87,7 +87,7 @@ func (r *resourceManager) ListProtocols() []protocol.ID { } sort.Slice(result, func(i, j int) bool { - return strings.Compare(string(result[i]), string(result[j])) < 0 + return result[i] < result[j] }) return result diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit.go index 7d0823b1..ef7fcdc9 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit.go @@ -12,6 +12,7 @@ package rcmgr import ( "encoding/json" "io" + "math" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" @@ -56,7 +57,7 @@ func NewDefaultLimiterFromJSON(in io.Reader) (Limiter, error) { } // NewLimiterFromJSON creates a new limiter by parsing a json configuration. -func NewLimiterFromJSON(in io.Reader, defaults LimitConfig) (Limiter, error) { +func NewLimiterFromJSON(in io.Reader, defaults ConcreteLimitConfig) (Limiter, error) { cfg, err := readLimiterConfigFromJSON(in, defaults) if err != nil { return nil, err @@ -64,37 +65,67 @@ func NewLimiterFromJSON(in io.Reader, defaults LimitConfig) (Limiter, error) { return &fixedLimiter{cfg}, nil } -func readLimiterConfigFromJSON(in io.Reader, defaults LimitConfig) (LimitConfig, error) { - var cfg LimitConfig +func readLimiterConfigFromJSON(in io.Reader, defaults ConcreteLimitConfig) (ConcreteLimitConfig, error) { + var cfg PartialLimitConfig if err := json.NewDecoder(in).Decode(&cfg); err != nil { - return LimitConfig{}, err + return ConcreteLimitConfig{}, err } - cfg.Apply(defaults) - return cfg, nil + return cfg.Build(defaults), nil } // fixedLimiter is a limiter with fixed limits. type fixedLimiter struct { - LimitConfig + ConcreteLimitConfig } var _ Limiter = (*fixedLimiter)(nil) -func NewFixedLimiter(conf LimitConfig) Limiter { +func NewFixedLimiter(conf ConcreteLimitConfig) Limiter { log.Debugw("initializing new limiter with config", "limits", conf) - return &fixedLimiter{LimitConfig: conf} + return &fixedLimiter{conf} } // BaseLimit is a mixin type for basic resource limits. type BaseLimit struct { - Streams int - StreamsInbound int - StreamsOutbound int - Conns int - ConnsInbound int - ConnsOutbound int - FD int - Memory int64 + Streams int `json:",omitempty"` + StreamsInbound int `json:",omitempty"` + StreamsOutbound int `json:",omitempty"` + Conns int `json:",omitempty"` + ConnsInbound int `json:",omitempty"` + ConnsOutbound int `json:",omitempty"` + FD int `json:",omitempty"` + Memory int64 `json:",omitempty"` +} + +func valueOrBlockAll(n int) LimitVal { + if n == 0 { + return BlockAllLimit + } else if n == math.MaxInt { + return Unlimited + } + return LimitVal(n) +} +func valueOrBlockAll64(n int64) LimitVal64 { + if n == 0 { + return BlockAllLimit64 + } else if n == math.MaxInt { + return Unlimited64 + } + return LimitVal64(n) +} + +// ToResourceLimits converts the BaseLimit to a ResourceLimits +func (l BaseLimit) ToResourceLimits() ResourceLimits { + return ResourceLimits{ + Streams: valueOrBlockAll(l.Streams), + StreamsInbound: valueOrBlockAll(l.StreamsInbound), + StreamsOutbound: valueOrBlockAll(l.StreamsOutbound), + Conns: valueOrBlockAll(l.Conns), + ConnsInbound: valueOrBlockAll(l.ConnsInbound), + ConnsOutbound: valueOrBlockAll(l.ConnsOutbound), + FD: valueOrBlockAll(l.FD), + Memory: valueOrBlockAll64(l.Memory), + } } // Apply overwrites all zero-valued limits with the values of l2 @@ -128,16 +159,16 @@ func (l *BaseLimit) Apply(l2 BaseLimit) { // BaseLimitIncrease is the increase per GiB of allowed memory. type BaseLimitIncrease struct { - Streams int - StreamsInbound int - StreamsOutbound int - Conns int - ConnsInbound int - ConnsOutbound int + Streams int `json:",omitempty"` + StreamsInbound int `json:",omitempty"` + StreamsOutbound int `json:",omitempty"` + Conns int `json:",omitempty"` + ConnsInbound int `json:",omitempty"` + ConnsOutbound int `json:",omitempty"` // Memory is in bytes. Values over 1>>30 (1GiB) don't make sense. - Memory int64 + Memory int64 `json:",omitempty"` // FDFraction is expected to be >= 0 and <= 1. - FDFraction float64 + FDFraction float64 `json:",omitempty"` } // Apply overwrites all zero-valued limits with the values of l2 @@ -169,7 +200,7 @@ func (l *BaseLimitIncrease) Apply(l2 BaseLimitIncrease) { } } -func (l *BaseLimit) GetStreamLimit(dir network.Direction) int { +func (l BaseLimit) GetStreamLimit(dir network.Direction) int { if dir == network.DirInbound { return l.StreamsInbound } else { @@ -177,11 +208,11 @@ func (l *BaseLimit) GetStreamLimit(dir network.Direction) int { } } -func (l *BaseLimit) GetStreamTotalLimit() int { +func (l BaseLimit) GetStreamTotalLimit() int { return l.Streams } -func (l *BaseLimit) GetConnLimit(dir network.Direction) int { +func (l BaseLimit) GetConnLimit(dir network.Direction) int { if dir == network.DirInbound { return l.ConnsInbound } else { @@ -189,78 +220,78 @@ func (l *BaseLimit) GetConnLimit(dir network.Direction) int { } } -func (l *BaseLimit) GetConnTotalLimit() int { +func (l BaseLimit) GetConnTotalLimit() int { return l.Conns } -func (l *BaseLimit) GetFDLimit() int { +func (l BaseLimit) GetFDLimit() int { return l.FD } -func (l *BaseLimit) GetMemoryLimit() int64 { +func (l BaseLimit) GetMemoryLimit() int64 { return l.Memory } func (l *fixedLimiter) GetSystemLimits() Limit { - return &l.System + return &l.system } func (l *fixedLimiter) GetTransientLimits() Limit { - return &l.Transient + return &l.transient } func (l *fixedLimiter) GetAllowlistedSystemLimits() Limit { - return &l.AllowlistedSystem + return &l.allowlistedSystem } func (l *fixedLimiter) GetAllowlistedTransientLimits() Limit { - return &l.AllowlistedTransient + return &l.allowlistedTransient } func (l *fixedLimiter) GetServiceLimits(svc string) Limit { - sl, ok := l.Service[svc] + sl, ok := l.service[svc] if !ok { - return &l.ServiceDefault + return &l.serviceDefault } return &sl } func (l *fixedLimiter) GetServicePeerLimits(svc string) Limit { - pl, ok := l.ServicePeer[svc] + pl, ok := l.servicePeer[svc] if !ok { - return &l.ServicePeerDefault + return &l.servicePeerDefault } return &pl } func (l *fixedLimiter) GetProtocolLimits(proto protocol.ID) Limit { - pl, ok := l.Protocol[proto] + pl, ok := l.protocol[proto] if !ok { - return &l.ProtocolDefault + return &l.protocolDefault } return &pl } func (l *fixedLimiter) GetProtocolPeerLimits(proto protocol.ID) Limit { - pl, ok := l.ProtocolPeer[proto] + pl, ok := l.protocolPeer[proto] if !ok { - return &l.ProtocolPeerDefault + return &l.protocolPeerDefault } return &pl } func (l *fixedLimiter) GetPeerLimits(p peer.ID) Limit { - pl, ok := l.Peer[p] + pl, ok := l.peer[p] if !ok { - return &l.PeerDefault + return &l.peerDefault } return &pl } func (l *fixedLimiter) GetStreamLimits(_ peer.ID) Limit { - return &l.Stream + return &l.stream } func (l *fixedLimiter) GetConnLimits() Limit { - return &l.Conn + return &l.conn } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_config_test.backwards-compat.json b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_config_test.backwards-compat.json new file mode 100644 index 00000000..b1a5e9ec --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_config_test.backwards-compat.json @@ -0,0 +1,45 @@ +{ + "System": { + "Memory": 65536, + "Conns": 16, + "ConnsInbound": 8, + "ConnsOutbound": 16, + "FD": 16 + }, + "ServiceDefault": { + "Memory": 8765 + }, + "Service": { + "A": { + "Memory": 8192 + }, + "B": {} + }, + "ServicePeerDefault": { + "Memory": 2048 + }, + "ServicePeer": { + "A": { + "Memory": 4096 + } + }, + "ProtocolDefault": { + "Memory": 2048 + }, + "ProtocolPeerDefault": { + "Memory": 1024 + }, + "Protocol": { + "/A": { + "Memory": 8192 + } + }, + "PeerDefault": { + "Memory": 4096 + }, + "Peer": { + "12D3KooWPFH2Bx2tPfw6RLxN8k2wh47GRXgkt9yrAHU37zFwHWzS": { + "Memory": 4097 + } + } +} \ No newline at end of file diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_defaults.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_defaults.go index a9c73a4d..e7489c45 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_defaults.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/limit_defaults.go @@ -2,8 +2,11 @@ package rcmgr import ( "encoding/json" + "fmt" "math" + "strconv" + "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" @@ -108,53 +111,339 @@ func (cfg *ScalingLimitConfig) AddProtocolPeerLimit(proto protocol.ID, base Base } } -type LimitConfig struct { - System BaseLimit `json:",omitempty"` - Transient BaseLimit `json:",omitempty"` +type LimitVal int + +const ( + // DefaultLimit is the default value for resources. The exact value depends on the context, but will get values from `DefaultLimits`. + DefaultLimit LimitVal = 0 + // Unlimited is the value for unlimited resources. An arbitrarily high number will also work. + Unlimited LimitVal = -1 + // BlockAllLimit is the LimitVal for allowing no amount of resources. + BlockAllLimit LimitVal = -2 +) + +func (l LimitVal) MarshalJSON() ([]byte, error) { + if l == Unlimited { + return json.Marshal("unlimited") + } else if l == DefaultLimit { + return json.Marshal("default") + } else if l == BlockAllLimit { + return json.Marshal("blockAll") + } + return json.Marshal(int(l)) +} + +func (l *LimitVal) UnmarshalJSON(b []byte) error { + if string(b) == `"default"` { + *l = DefaultLimit + return nil + } else if string(b) == `"unlimited"` { + *l = Unlimited + return nil + } else if string(b) == `"blockAll"` { + *l = BlockAllLimit + return nil + } + + var val int + if err := json.Unmarshal(b, &val); err != nil { + return err + } + + if val == 0 { + // If there is an explicit 0 in the JSON we should interpret this as block all. + *l = BlockAllLimit + return nil + } + + *l = LimitVal(val) + return nil +} + +func (l LimitVal) Build(defaultVal int) int { + if l == DefaultLimit { + return defaultVal + } + if l == Unlimited { + return math.MaxInt + } + if l == BlockAllLimit { + return 0 + } + return int(l) +} + +type LimitVal64 int64 + +const ( + // Default is the default value for resources. + DefaultLimit64 LimitVal64 = 0 + // Unlimited is the value for unlimited resources. + Unlimited64 LimitVal64 = -1 + // BlockAllLimit64 is the LimitVal for allowing no amount of resources. + BlockAllLimit64 LimitVal64 = -2 +) + +func (l LimitVal64) MarshalJSON() ([]byte, error) { + if l == Unlimited64 { + return json.Marshal("unlimited") + } else if l == DefaultLimit64 { + return json.Marshal("default") + } else if l == BlockAllLimit64 { + return json.Marshal("blockAll") + } + + // Convert this to a string because JSON doesn't support 64-bit integers. + return json.Marshal(strconv.FormatInt(int64(l), 10)) +} + +func (l *LimitVal64) UnmarshalJSON(b []byte) error { + if string(b) == `"default"` { + *l = DefaultLimit64 + return nil + } else if string(b) == `"unlimited"` { + *l = Unlimited64 + return nil + } else if string(b) == `"blockAll"` { + *l = BlockAllLimit64 + return nil + } + + var val string + if err := json.Unmarshal(b, &val); err != nil { + // Is this an integer? Possible because of backwards compatibility. + var val int + if err := json.Unmarshal(b, &val); err != nil { + return fmt.Errorf("failed to unmarshal limit value: %w", err) + } + + if val == 0 { + // If there is an explicit 0 in the JSON we should interpret this as block all. + *l = BlockAllLimit64 + return nil + } + + *l = LimitVal64(val) + return nil + } + + i, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return err + } + + if i == 0 { + // If there is an explicit 0 in the JSON we should interpret this as block all. + *l = BlockAllLimit64 + return nil + } + + *l = LimitVal64(i) + return nil +} + +func (l LimitVal64) Build(defaultVal int64) int64 { + if l == DefaultLimit64 { + return defaultVal + } + if l == Unlimited64 { + return math.MaxInt64 + } + if l == BlockAllLimit64 { + return 0 + } + return int64(l) +} + +// ResourceLimits is the type for basic resource limits. +type ResourceLimits struct { + Streams LimitVal `json:",omitempty"` + StreamsInbound LimitVal `json:",omitempty"` + StreamsOutbound LimitVal `json:",omitempty"` + Conns LimitVal `json:",omitempty"` + ConnsInbound LimitVal `json:",omitempty"` + ConnsOutbound LimitVal `json:",omitempty"` + FD LimitVal `json:",omitempty"` + Memory LimitVal64 `json:",omitempty"` +} + +func (l *ResourceLimits) IsDefault() bool { + if l == nil { + return true + } + + if l.Streams == DefaultLimit && + l.StreamsInbound == DefaultLimit && + l.StreamsOutbound == DefaultLimit && + l.Conns == DefaultLimit && + l.ConnsInbound == DefaultLimit && + l.ConnsOutbound == DefaultLimit && + l.FD == DefaultLimit && + l.Memory == DefaultLimit64 { + return true + } + return false +} + +func (l *ResourceLimits) ToMaybeNilPtr() *ResourceLimits { + if l.IsDefault() { + return nil + } + return l +} + +// Apply overwrites all default limits with the values of l2 +func (l *ResourceLimits) Apply(l2 ResourceLimits) { + if l.Streams == DefaultLimit { + l.Streams = l2.Streams + } + if l.StreamsInbound == DefaultLimit { + l.StreamsInbound = l2.StreamsInbound + } + if l.StreamsOutbound == DefaultLimit { + l.StreamsOutbound = l2.StreamsOutbound + } + if l.Conns == DefaultLimit { + l.Conns = l2.Conns + } + if l.ConnsInbound == DefaultLimit { + l.ConnsInbound = l2.ConnsInbound + } + if l.ConnsOutbound == DefaultLimit { + l.ConnsOutbound = l2.ConnsOutbound + } + if l.FD == DefaultLimit { + l.FD = l2.FD + } + if l.Memory == DefaultLimit64 { + l.Memory = l2.Memory + } +} + +func (l *ResourceLimits) Build(defaults Limit) BaseLimit { + if l == nil { + return BaseLimit{ + Streams: defaults.GetStreamTotalLimit(), + StreamsInbound: defaults.GetStreamLimit(network.DirInbound), + StreamsOutbound: defaults.GetStreamLimit(network.DirOutbound), + Conns: defaults.GetConnTotalLimit(), + ConnsInbound: defaults.GetConnLimit(network.DirInbound), + ConnsOutbound: defaults.GetConnLimit(network.DirOutbound), + FD: defaults.GetFDLimit(), + Memory: defaults.GetMemoryLimit(), + } + } + + return BaseLimit{ + Streams: l.Streams.Build(defaults.GetStreamTotalLimit()), + StreamsInbound: l.StreamsInbound.Build(defaults.GetStreamLimit(network.DirInbound)), + StreamsOutbound: l.StreamsOutbound.Build(defaults.GetStreamLimit(network.DirOutbound)), + Conns: l.Conns.Build(defaults.GetConnTotalLimit()), + ConnsInbound: l.ConnsInbound.Build(defaults.GetConnLimit(network.DirInbound)), + ConnsOutbound: l.ConnsOutbound.Build(defaults.GetConnLimit(network.DirOutbound)), + FD: l.FD.Build(defaults.GetFDLimit()), + Memory: l.Memory.Build(defaults.GetMemoryLimit()), + } +} + +type PartialLimitConfig struct { + System ResourceLimits `json:",omitempty"` + Transient ResourceLimits `json:",omitempty"` // Limits that are applied to resources with an allowlisted multiaddr. // These will only be used if the normal System & Transient limits are // reached. - AllowlistedSystem BaseLimit `json:",omitempty"` - AllowlistedTransient BaseLimit `json:",omitempty"` + AllowlistedSystem ResourceLimits `json:",omitempty"` + AllowlistedTransient ResourceLimits `json:",omitempty"` - ServiceDefault BaseLimit `json:",omitempty"` - Service map[string]BaseLimit `json:",omitempty"` + ServiceDefault ResourceLimits `json:",omitempty"` + Service map[string]ResourceLimits `json:",omitempty"` - ServicePeerDefault BaseLimit `json:",omitempty"` - ServicePeer map[string]BaseLimit `json:",omitempty"` + ServicePeerDefault ResourceLimits `json:",omitempty"` + ServicePeer map[string]ResourceLimits `json:",omitempty"` - ProtocolDefault BaseLimit `json:",omitempty"` - Protocol map[protocol.ID]BaseLimit `json:",omitempty"` + ProtocolDefault ResourceLimits `json:",omitempty"` + Protocol map[protocol.ID]ResourceLimits `json:",omitempty"` - ProtocolPeerDefault BaseLimit `json:",omitempty"` - ProtocolPeer map[protocol.ID]BaseLimit `json:",omitempty"` + ProtocolPeerDefault ResourceLimits `json:",omitempty"` + ProtocolPeer map[protocol.ID]ResourceLimits `json:",omitempty"` - PeerDefault BaseLimit `json:",omitempty"` - Peer map[peer.ID]BaseLimit `json:",omitempty"` + PeerDefault ResourceLimits `json:",omitempty"` + Peer map[peer.ID]ResourceLimits `json:",omitempty"` - Conn BaseLimit `json:",omitempty"` - Stream BaseLimit `json:",omitempty"` + Conn ResourceLimits `json:",omitempty"` + Stream ResourceLimits `json:",omitempty"` } -func (cfg *LimitConfig) MarshalJSON() ([]byte, error) { +func (cfg *PartialLimitConfig) MarshalJSON() ([]byte, error) { // we want to marshal the encoded peer id - encodedPeerMap := make(map[string]BaseLimit, len(cfg.Peer)) + encodedPeerMap := make(map[string]ResourceLimits, len(cfg.Peer)) for p, v := range cfg.Peer { encodedPeerMap[p.String()] = v } - type Alias LimitConfig + type Alias PartialLimitConfig return json.Marshal(&struct { *Alias - Peer map[string]BaseLimit `json:",omitempty"` + // String so we can have the properly marshalled peer id + Peer map[string]ResourceLimits `json:",omitempty"` + + // The rest of the fields as pointers so that we omit empty values in the serialized result + System *ResourceLimits `json:",omitempty"` + Transient *ResourceLimits `json:",omitempty"` + AllowlistedSystem *ResourceLimits `json:",omitempty"` + AllowlistedTransient *ResourceLimits `json:",omitempty"` + + ServiceDefault *ResourceLimits `json:",omitempty"` + + ServicePeerDefault *ResourceLimits `json:",omitempty"` + + ProtocolDefault *ResourceLimits `json:",omitempty"` + + ProtocolPeerDefault *ResourceLimits `json:",omitempty"` + + PeerDefault *ResourceLimits `json:",omitempty"` + + Conn *ResourceLimits `json:",omitempty"` + Stream *ResourceLimits `json:",omitempty"` }{ Alias: (*Alias)(cfg), Peer: encodedPeerMap, + + System: cfg.System.ToMaybeNilPtr(), + Transient: cfg.Transient.ToMaybeNilPtr(), + AllowlistedSystem: cfg.AllowlistedSystem.ToMaybeNilPtr(), + AllowlistedTransient: cfg.AllowlistedTransient.ToMaybeNilPtr(), + ServiceDefault: cfg.ServiceDefault.ToMaybeNilPtr(), + ServicePeerDefault: cfg.ServicePeerDefault.ToMaybeNilPtr(), + ProtocolDefault: cfg.ProtocolDefault.ToMaybeNilPtr(), + ProtocolPeerDefault: cfg.ProtocolPeerDefault.ToMaybeNilPtr(), + PeerDefault: cfg.PeerDefault.ToMaybeNilPtr(), + Conn: cfg.Conn.ToMaybeNilPtr(), + Stream: cfg.Stream.ToMaybeNilPtr(), }) } -func (cfg *LimitConfig) Apply(c LimitConfig) { +func applyResourceLimitsMap[K comparable](this *map[K]ResourceLimits, other map[K]ResourceLimits, fallbackDefault ResourceLimits) { + for k, l := range *this { + r := fallbackDefault + if l2, ok := other[k]; ok { + r = l2 + } + l.Apply(r) + (*this)[k] = l + } + if *this == nil && other != nil { + *this = make(map[K]ResourceLimits) + } + for k, l := range other { + if _, ok := (*this)[k]; !ok { + (*this)[k] = l + } + } +} + +func (cfg *PartialLimitConfig) Apply(c PartialLimitConfig) { cfg.System.Apply(c.System) cfg.Transient.Apply(c.Transient) cfg.AllowlistedSystem.Apply(c.AllowlistedSystem) @@ -167,90 +456,123 @@ func (cfg *LimitConfig) Apply(c LimitConfig) { cfg.Conn.Apply(c.Conn) cfg.Stream.Apply(c.Stream) - // TODO: the following could be solved a lot nicer, if only we could use generics - for s, l := range cfg.Service { - r := cfg.ServiceDefault - if l2, ok := c.Service[s]; ok { - r = l2 - } - l.Apply(r) - cfg.Service[s] = l - } - if c.Service != nil && cfg.Service == nil { - cfg.Service = make(map[string]BaseLimit) - } - for s, l := range c.Service { - if _, ok := cfg.Service[s]; !ok { - cfg.Service[s] = l - } - } + applyResourceLimitsMap(&cfg.Service, c.Service, cfg.ServiceDefault) + applyResourceLimitsMap(&cfg.ServicePeer, c.ServicePeer, cfg.ServicePeerDefault) + applyResourceLimitsMap(&cfg.Protocol, c.Protocol, cfg.ProtocolDefault) + applyResourceLimitsMap(&cfg.ProtocolPeer, c.ProtocolPeer, cfg.ProtocolPeerDefault) + applyResourceLimitsMap(&cfg.Peer, c.Peer, cfg.PeerDefault) +} - for s, l := range cfg.ServicePeer { - r := cfg.ServicePeerDefault - if l2, ok := c.ServicePeer[s]; ok { - r = l2 - } - l.Apply(r) - cfg.ServicePeer[s] = l - } - if c.ServicePeer != nil && cfg.ServicePeer == nil { - cfg.ServicePeer = make(map[string]BaseLimit) - } - for s, l := range c.ServicePeer { - if _, ok := cfg.ServicePeer[s]; !ok { - cfg.ServicePeer[s] = l - } - } +func (cfg PartialLimitConfig) Build(defaults ConcreteLimitConfig) ConcreteLimitConfig { + out := defaults + + out.system = cfg.System.Build(defaults.system) + out.transient = cfg.Transient.Build(defaults.transient) + out.allowlistedSystem = cfg.AllowlistedSystem.Build(defaults.allowlistedSystem) + out.allowlistedTransient = cfg.AllowlistedTransient.Build(defaults.allowlistedTransient) + out.serviceDefault = cfg.ServiceDefault.Build(defaults.serviceDefault) + out.servicePeerDefault = cfg.ServicePeerDefault.Build(defaults.servicePeerDefault) + out.protocolDefault = cfg.ProtocolDefault.Build(defaults.protocolDefault) + out.protocolPeerDefault = cfg.ProtocolPeerDefault.Build(defaults.protocolPeerDefault) + out.peerDefault = cfg.PeerDefault.Build(defaults.peerDefault) + out.conn = cfg.Conn.Build(defaults.conn) + out.stream = cfg.Stream.Build(defaults.stream) + + out.service = buildMapWithDefault(cfg.Service, defaults.service, out.serviceDefault) + out.servicePeer = buildMapWithDefault(cfg.ServicePeer, defaults.servicePeer, out.servicePeerDefault) + out.protocol = buildMapWithDefault(cfg.Protocol, defaults.protocol, out.protocolDefault) + out.protocolPeer = buildMapWithDefault(cfg.ProtocolPeer, defaults.protocolPeer, out.protocolPeerDefault) + out.peer = buildMapWithDefault(cfg.Peer, defaults.peer, out.peerDefault) + + return out +} - for s, l := range cfg.Protocol { - r := cfg.ProtocolDefault - if l2, ok := c.Protocol[s]; ok { - r = l2 - } - l.Apply(r) - cfg.Protocol[s] = l - } - if c.Protocol != nil && cfg.Protocol == nil { - cfg.Protocol = make(map[protocol.ID]BaseLimit) - } - for s, l := range c.Protocol { - if _, ok := cfg.Protocol[s]; !ok { - cfg.Protocol[s] = l - } +func buildMapWithDefault[K comparable](definedLimits map[K]ResourceLimits, defaults map[K]BaseLimit, fallbackDefault BaseLimit) map[K]BaseLimit { + if definedLimits == nil && defaults == nil { + return nil } - for s, l := range cfg.ProtocolPeer { - r := cfg.ProtocolPeerDefault - if l2, ok := c.ProtocolPeer[s]; ok { - r = l2 - } - l.Apply(r) - cfg.ProtocolPeer[s] = l - } - if c.ProtocolPeer != nil && cfg.ProtocolPeer == nil { - cfg.ProtocolPeer = make(map[protocol.ID]BaseLimit) + out := make(map[K]BaseLimit) + for k, l := range defaults { + out[k] = l } - for s, l := range c.ProtocolPeer { - if _, ok := cfg.ProtocolPeer[s]; !ok { - cfg.ProtocolPeer[s] = l + + for k, l := range definedLimits { + if defaultForKey, ok := out[k]; ok { + out[k] = l.Build(defaultForKey) + } else { + out[k] = l.Build(fallbackDefault) } } - for s, l := range cfg.Peer { - r := cfg.PeerDefault - if l2, ok := c.Peer[s]; ok { - r = l2 - } - l.Apply(r) - cfg.Peer[s] = l + return out +} + +// ConcreteLimitConfig is similar to PartialLimitConfig, but all values are defined. +// There is no unset "default" value. Commonly constructed by calling +// PartialLimitConfig.Build(rcmgr.DefaultLimits.AutoScale()) +type ConcreteLimitConfig struct { + system BaseLimit + transient BaseLimit + + // Limits that are applied to resources with an allowlisted multiaddr. + // These will only be used if the normal System & Transient limits are + // reached. + allowlistedSystem BaseLimit + allowlistedTransient BaseLimit + + serviceDefault BaseLimit + service map[string]BaseLimit + + servicePeerDefault BaseLimit + servicePeer map[string]BaseLimit + + protocolDefault BaseLimit + protocol map[protocol.ID]BaseLimit + + protocolPeerDefault BaseLimit + protocolPeer map[protocol.ID]BaseLimit + + peerDefault BaseLimit + peer map[peer.ID]BaseLimit + + conn BaseLimit + stream BaseLimit +} + +func resourceLimitsMapFromBaseLimitMap[K comparable](baseLimitMap map[K]BaseLimit) map[K]ResourceLimits { + if baseLimitMap == nil { + return nil } - if c.Peer != nil && cfg.Peer == nil { - cfg.Peer = make(map[peer.ID]BaseLimit) + + out := make(map[K]ResourceLimits) + for k, l := range baseLimitMap { + out[k] = l.ToResourceLimits() } - for s, l := range c.Peer { - if _, ok := cfg.Peer[s]; !ok { - cfg.Peer[s] = l - } + + return out +} + +// ToPartialLimitConfig converts a ConcreteLimitConfig to a PartialLimitConfig. +// The returned PartialLimitConfig will have no default values. +func (cfg ConcreteLimitConfig) ToPartialLimitConfig() PartialLimitConfig { + return PartialLimitConfig{ + System: cfg.system.ToResourceLimits(), + Transient: cfg.transient.ToResourceLimits(), + AllowlistedSystem: cfg.allowlistedSystem.ToResourceLimits(), + AllowlistedTransient: cfg.allowlistedTransient.ToResourceLimits(), + ServiceDefault: cfg.serviceDefault.ToResourceLimits(), + Service: resourceLimitsMapFromBaseLimitMap(cfg.service), + ServicePeerDefault: cfg.servicePeerDefault.ToResourceLimits(), + ServicePeer: resourceLimitsMapFromBaseLimitMap(cfg.servicePeer), + ProtocolDefault: cfg.protocolDefault.ToResourceLimits(), + Protocol: resourceLimitsMapFromBaseLimitMap(cfg.protocol), + ProtocolPeerDefault: cfg.protocolPeerDefault.ToResourceLimits(), + ProtocolPeer: resourceLimitsMapFromBaseLimitMap(cfg.protocolPeer), + PeerDefault: cfg.peerDefault.ToResourceLimits(), + Peer: resourceLimitsMapFromBaseLimitMap(cfg.peer), + Conn: cfg.conn.ToResourceLimits(), + Stream: cfg.stream.ToResourceLimits(), } } @@ -258,54 +580,54 @@ func (cfg *LimitConfig) Apply(c LimitConfig) { // memory is the amount of memory that the stack is allowed to consume, // for a dedicated node it's recommended to use 1/8 of the installed system memory. // If memory is smaller than 128 MB, the base configuration will be used. -func (cfg *ScalingLimitConfig) Scale(memory int64, numFD int) LimitConfig { - lc := LimitConfig{ - System: scale(cfg.SystemBaseLimit, cfg.SystemLimitIncrease, memory, numFD), - Transient: scale(cfg.TransientBaseLimit, cfg.TransientLimitIncrease, memory, numFD), - AllowlistedSystem: scale(cfg.AllowlistedSystemBaseLimit, cfg.AllowlistedSystemLimitIncrease, memory, numFD), - AllowlistedTransient: scale(cfg.AllowlistedTransientBaseLimit, cfg.AllowlistedTransientLimitIncrease, memory, numFD), - ServiceDefault: scale(cfg.ServiceBaseLimit, cfg.ServiceLimitIncrease, memory, numFD), - ServicePeerDefault: scale(cfg.ServicePeerBaseLimit, cfg.ServicePeerLimitIncrease, memory, numFD), - ProtocolDefault: scale(cfg.ProtocolBaseLimit, cfg.ProtocolLimitIncrease, memory, numFD), - ProtocolPeerDefault: scale(cfg.ProtocolPeerBaseLimit, cfg.ProtocolPeerLimitIncrease, memory, numFD), - PeerDefault: scale(cfg.PeerBaseLimit, cfg.PeerLimitIncrease, memory, numFD), - Conn: scale(cfg.ConnBaseLimit, cfg.ConnLimitIncrease, memory, numFD), - Stream: scale(cfg.StreamBaseLimit, cfg.ConnLimitIncrease, memory, numFD), +func (cfg *ScalingLimitConfig) Scale(memory int64, numFD int) ConcreteLimitConfig { + lc := ConcreteLimitConfig{ + system: scale(cfg.SystemBaseLimit, cfg.SystemLimitIncrease, memory, numFD), + transient: scale(cfg.TransientBaseLimit, cfg.TransientLimitIncrease, memory, numFD), + allowlistedSystem: scale(cfg.AllowlistedSystemBaseLimit, cfg.AllowlistedSystemLimitIncrease, memory, numFD), + allowlistedTransient: scale(cfg.AllowlistedTransientBaseLimit, cfg.AllowlistedTransientLimitIncrease, memory, numFD), + serviceDefault: scale(cfg.ServiceBaseLimit, cfg.ServiceLimitIncrease, memory, numFD), + servicePeerDefault: scale(cfg.ServicePeerBaseLimit, cfg.ServicePeerLimitIncrease, memory, numFD), + protocolDefault: scale(cfg.ProtocolBaseLimit, cfg.ProtocolLimitIncrease, memory, numFD), + protocolPeerDefault: scale(cfg.ProtocolPeerBaseLimit, cfg.ProtocolPeerLimitIncrease, memory, numFD), + peerDefault: scale(cfg.PeerBaseLimit, cfg.PeerLimitIncrease, memory, numFD), + conn: scale(cfg.ConnBaseLimit, cfg.ConnLimitIncrease, memory, numFD), + stream: scale(cfg.StreamBaseLimit, cfg.ConnLimitIncrease, memory, numFD), } if cfg.ServiceLimits != nil { - lc.Service = make(map[string]BaseLimit) + lc.service = make(map[string]BaseLimit) for svc, l := range cfg.ServiceLimits { - lc.Service[svc] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD) + lc.service[svc] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD) } } if cfg.ProtocolLimits != nil { - lc.Protocol = make(map[protocol.ID]BaseLimit) + lc.protocol = make(map[protocol.ID]BaseLimit) for proto, l := range cfg.ProtocolLimits { - lc.Protocol[proto] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD) + lc.protocol[proto] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD) } } if cfg.PeerLimits != nil { - lc.Peer = make(map[peer.ID]BaseLimit) + lc.peer = make(map[peer.ID]BaseLimit) for p, l := range cfg.PeerLimits { - lc.Peer[p] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD) + lc.peer[p] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD) } } if cfg.ServicePeerLimits != nil { - lc.ServicePeer = make(map[string]BaseLimit) + lc.servicePeer = make(map[string]BaseLimit) for svc, l := range cfg.ServicePeerLimits { - lc.ServicePeer[svc] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD) + lc.servicePeer[svc] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD) } } if cfg.ProtocolPeerLimits != nil { - lc.ProtocolPeer = make(map[protocol.ID]BaseLimit) + lc.protocolPeer = make(map[protocol.ID]BaseLimit) for p, l := range cfg.ProtocolPeerLimits { - lc.ProtocolPeer[p] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD) + lc.protocolPeer[p] = scale(l.BaseLimit, l.BaseLimitIncrease, memory, numFD) } } return lc } -func (cfg *ScalingLimitConfig) AutoScale() LimitConfig { +func (cfg *ScalingLimitConfig) AutoScale() ConcreteLimitConfig { return cfg.Scale( int64(memory.TotalMemory())/8, getNumFDs()/2, @@ -540,18 +862,18 @@ var infiniteBaseLimit = BaseLimit{ Memory: math.MaxInt64, } -// InfiniteLimits are a limiter configuration that uses infinite limits, thus effectively not limiting anything. +// InfiniteLimits are a limiter configuration that uses unlimited limits, thus effectively not limiting anything. // Keep in mind that the operating system limits the number of file descriptors that an application can use. -var InfiniteLimits = LimitConfig{ - System: infiniteBaseLimit, - Transient: infiniteBaseLimit, - AllowlistedSystem: infiniteBaseLimit, - AllowlistedTransient: infiniteBaseLimit, - ServiceDefault: infiniteBaseLimit, - ServicePeerDefault: infiniteBaseLimit, - ProtocolDefault: infiniteBaseLimit, - ProtocolPeerDefault: infiniteBaseLimit, - PeerDefault: infiniteBaseLimit, - Conn: infiniteBaseLimit, - Stream: infiniteBaseLimit, +var InfiniteLimits = ConcreteLimitConfig{ + system: infiniteBaseLimit, + transient: infiniteBaseLimit, + allowlistedSystem: infiniteBaseLimit, + allowlistedTransient: infiniteBaseLimit, + serviceDefault: infiniteBaseLimit, + servicePeerDefault: infiniteBaseLimit, + protocolDefault: infiniteBaseLimit, + protocolPeerDefault: infiniteBaseLimit, + peerDefault: infiniteBaseLimit, + conn: infiniteBaseLimit, + stream: infiniteBaseLimit, } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/rcmgr.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/rcmgr.go index 03d100a6..7f15bb76 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/rcmgr.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/rcmgr.go @@ -517,40 +517,20 @@ func peerScopeName(p peer.ID) string { return fmt.Sprintf("peer:%s", p) } -// ParsePeerScopeName returns "" if name is not a peerScopeName -func ParsePeerScopeName(name string) peer.ID { +// PeerStrInScopeName returns "" if name is not a peerScopeName. Returns a string to avoid allocating a peer ID object +func PeerStrInScopeName(name string) string { if !strings.HasPrefix(name, "peer:") || IsSpan(name) { return "" } - parts := strings.SplitN(name, "peer:", 2) - if len(parts) != 2 { - return "" - } - p, err := peer.Decode(parts[1]) - if err != nil { + // Index to avoid allocating a new string + peerSplitIdx := strings.Index(name, "peer:") + if peerSplitIdx == -1 { return "" } + p := (name[peerSplitIdx+len("peer:"):]) return p } -// ParseServiceScopeName returns the service name if name is a serviceScopeName. -// Otherwise returns "" -func ParseServiceScopeName(name string) string { - if strings.HasPrefix(name, "service:") && !IsSpan(name) { - if strings.Contains(name, "peer:") { - // This is a service peer scope - return "" - } - parts := strings.SplitN(name, ":", 2) - if len(parts) != 2 { - return "" - } - - return parts[1] - } - return "" -} - // ParseProtocolScopeName returns the service name if name is a serviceScopeName. // Otherwise returns "" func ParseProtocolScopeName(name string) string { @@ -559,12 +539,13 @@ func ParseProtocolScopeName(name string) string { // This is a protocol peer scope return "" } - parts := strings.SplitN(name, ":", 2) - if len(parts) != 2 { - return ("") - } - return parts[1] + // Index to avoid allocating a new string + separatorIdx := strings.Index(name, ":") + if separatorIdx == -1 { + return "" + } + return name[separatorIdx+1:] } return "" } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/scope.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/scope.go index 2a83c695..60089c3a 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/scope.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/scope.go @@ -130,7 +130,7 @@ func (rc *resources) checkMemory(rsvp int64, prio uint8) error { } if !addOk || newmem > threshold { - return &errMemoryLimitExceeded{ + return &ErrMemoryLimitExceeded{ current: rc.memory, attempted: rsvp, limit: limit, @@ -171,7 +171,7 @@ func (rc *resources) addStreams(incount, outcount int) error { if incount > 0 { limit := rc.limit.GetStreamLimit(network.DirInbound) if rc.nstreamsIn+incount > limit { - return &errStreamOrConnLimitExceeded{ + return &ErrStreamOrConnLimitExceeded{ current: rc.nstreamsIn, attempted: incount, limit: limit, @@ -182,7 +182,7 @@ func (rc *resources) addStreams(incount, outcount int) error { if outcount > 0 { limit := rc.limit.GetStreamLimit(network.DirOutbound) if rc.nstreamsOut+outcount > limit { - return &errStreamOrConnLimitExceeded{ + return &ErrStreamOrConnLimitExceeded{ current: rc.nstreamsOut, attempted: outcount, limit: limit, @@ -192,7 +192,7 @@ func (rc *resources) addStreams(incount, outcount int) error { } if limit := rc.limit.GetStreamTotalLimit(); rc.nstreamsIn+incount+rc.nstreamsOut+outcount > limit { - return &errStreamOrConnLimitExceeded{ + return &ErrStreamOrConnLimitExceeded{ current: rc.nstreamsIn + rc.nstreamsOut, attempted: incount + outcount, limit: limit, @@ -244,7 +244,7 @@ func (rc *resources) addConns(incount, outcount, fdcount int) error { if incount > 0 { limit := rc.limit.GetConnLimit(network.DirInbound) if rc.nconnsIn+incount > limit { - return &errStreamOrConnLimitExceeded{ + return &ErrStreamOrConnLimitExceeded{ current: rc.nconnsIn, attempted: incount, limit: limit, @@ -255,7 +255,7 @@ func (rc *resources) addConns(incount, outcount, fdcount int) error { if outcount > 0 { limit := rc.limit.GetConnLimit(network.DirOutbound) if rc.nconnsOut+outcount > limit { - return &errStreamOrConnLimitExceeded{ + return &ErrStreamOrConnLimitExceeded{ current: rc.nconnsOut, attempted: outcount, limit: limit, @@ -265,7 +265,7 @@ func (rc *resources) addConns(incount, outcount, fdcount int) error { } if connLimit := rc.limit.GetConnTotalLimit(); rc.nconnsIn+incount+rc.nconnsOut+outcount > connLimit { - return &errStreamOrConnLimitExceeded{ + return &ErrStreamOrConnLimitExceeded{ current: rc.nconnsIn + rc.nconnsOut, attempted: incount + outcount, limit: connLimit, @@ -275,7 +275,7 @@ func (rc *resources) addConns(incount, outcount, fdcount int) error { if fdcount > 0 { limit := rc.limit.GetFDLimit() if rc.nfd+fdcount > limit { - return &errStreamOrConnLimitExceeded{ + return &ErrStreamOrConnLimitExceeded{ current: rc.nfd, attempted: fdcount, limit: limit, diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/sys_unix.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/sys_unix.go index 75d4f7f4..50042c1f 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/sys_unix.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/resource-manager/sys_unix.go @@ -1,5 +1,4 @@ //go:build linux || darwin -// +build linux darwin package rcmgr diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/host/routed/routed.go b/vendor/github.com/libp2p/go-libp2p/p2p/host/routed/routed.go index 4188d2fc..eb8e58ee 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/host/routed/routed.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/host/routed/routed.go @@ -114,20 +114,21 @@ func (rh *RoutedHost) Connect(ctx context.Context, pi peer.AddrInfo) error { // try to connect again. newAddrs, err := rh.findPeerAddrs(ctx, pi.ID) if err != nil { - return fmt.Errorf("failed to find peers: %w", err) + log.Debugf("failed to find more peer addresses %s: %s", pi.ID, err) + return cerr } // Build lookup map - lookup := make(map[ma.Multiaddr]struct{}, len(addrs)) + lookup := make(map[string]struct{}, len(addrs)) for _, addr := range addrs { - lookup[addr] = struct{}{} + lookup[string(addr.Bytes())] = struct{}{} } // if there's any address that's not in the previous set // of addresses, try to connect again. If all addresses // where known previously we return the original error. for _, newAddr := range newAddrs { - if _, found := lookup[newAddr]; found { + if _, found := lookup[string(newAddr.Bytes())]; found { continue } @@ -188,7 +189,7 @@ func (rh *RoutedHost) SetStreamHandler(pid protocol.ID, handler network.StreamHa rh.host.SetStreamHandler(pid, handler) } -func (rh *RoutedHost) SetStreamHandlerMatch(pid protocol.ID, m func(string) bool, handler network.StreamHandler) { +func (rh *RoutedHost) SetStreamHandlerMatch(pid protocol.ID, m func(protocol.ID) bool, handler network.StreamHandler) { rh.host.SetStreamHandlerMatch(pid, m, handler) } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/dir.go b/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/dir.go new file mode 100644 index 00000000..2f89b951 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/dir.go @@ -0,0 +1,14 @@ +package metricshelper + +import "github.com/libp2p/go-libp2p/core/network" + +func GetDirection(dir network.Direction) string { + switch dir { + case network.DirOutbound: + return "outbound" + case network.DirInbound: + return "inbound" + default: + return "unknown" + } +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/pool.go b/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/pool.go new file mode 100644 index 00000000..3290ed5a --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/pool.go @@ -0,0 +1,26 @@ +package metricshelper + +import ( + "fmt" + "sync" +) + +const capacity = 8 + +var stringPool = sync.Pool{New: func() any { + s := make([]string, 0, capacity) + return &s +}} + +func GetStringSlice() *[]string { + s := stringPool.Get().(*[]string) + *s = (*s)[:0] + return s +} + +func PutStringSlice(s *[]string) { + if c := cap(*s); c < capacity { + panic(fmt.Sprintf("expected a string slice with capacity 8 or greater, got %d", c)) + } + stringPool.Put(s) +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/registerer.go b/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/registerer.go new file mode 100644 index 00000000..99027c0d --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/metricshelper/registerer.go @@ -0,0 +1,20 @@ +package metricshelper + +import ( + "errors" + + "github.com/prometheus/client_golang/prometheus" +) + +// RegisterCollectors registers the collectors with reg ignoring +// reregistration error and panics on any other error +func RegisterCollectors(reg prometheus.Registerer, collectors ...prometheus.Collector) { + for _, c := range collectors { + err := reg.Register(c) + if err != nil { + if ok := errors.As(err, &prometheus.AlreadyRegisteredError{}); !ok { + panic(err) + } + } + } +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/muxer/yamux/transport.go b/vendor/github.com/libp2p/go-libp2p/p2p/muxer/yamux/transport.go index df4ff1be..32738363 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/muxer/yamux/transport.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/muxer/yamux/transport.go @@ -12,6 +12,8 @@ import ( var DefaultTransport *Transport +const ID = "/yamux/1.0.0" + func init() { config := yamux.DefaultConfig() // We've bumped this to 16MiB as this critically limits throughput. diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/connmgr.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/connmgr.go index c124c6bb..b42a122f 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/connmgr.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/connmgr.go @@ -38,7 +38,7 @@ type BasicConnMgr struct { // channel-based semaphore that enforces only a single trim is in progress trimMutex sync.Mutex - connCount int32 + connCount atomic.Int32 // to be accessed atomically. This is mimicking the implementation of a sync.Once. // Take care of correct alignment when modifying this struct. trimCount uint64 @@ -158,7 +158,7 @@ func NewConnManager(low, hi int, opts ...Option) (*BasicConnMgr, error) { // We don't pay attention to the silence period or the grace period. // We try to not kill protected connections, but if that turns out to be necessary, not connection is safe! func (cm *BasicConnMgr) memoryEmergency() { - connCount := int(atomic.LoadInt32(&cm.connCount)) + connCount := int(cm.connCount.Load()) target := connCount - cm.cfg.lowWater if target < 0 { log.Warnw("Low on memory, but we only have a few connections", "num", connCount, "low watermark", cm.cfg.lowWater) @@ -346,7 +346,7 @@ func (cm *BasicConnMgr) background() { for { select { case <-ticker.C: - if atomic.LoadInt32(&cm.connCount) < int32(cm.cfg.highWater) { + if cm.connCount.Load() < int32(cm.cfg.highWater) { // Below high water, skip. continue } @@ -375,7 +375,7 @@ func (cm *BasicConnMgr) doTrim() { func (cm *BasicConnMgr) trim() { // do the actual trim. for _, c := range cm.getConnsToClose() { - log.Infow("closing conn", "peer", c.RemotePeer()) + log.Debugw("closing conn", "peer", c.RemotePeer()) c.Close() } } @@ -456,7 +456,7 @@ func (cm *BasicConnMgr) getConnsToClose() []network.Conn { return nil } - if int(atomic.LoadInt32(&cm.connCount)) <= cm.cfg.lowWater { + if int(cm.connCount.Load()) <= cm.cfg.lowWater { log.Info("open connection count below limit") return nil } @@ -632,7 +632,7 @@ func (cm *BasicConnMgr) GetInfo() CMInfo { LowWater: cm.cfg.lowWater, LastTrim: lastTrim, GracePeriod: cm.cfg.gracePeriod, - ConnCount: int(atomic.LoadInt32(&cm.connCount)), + ConnCount: int(cm.connCount.Load()), } } @@ -686,7 +686,7 @@ func (nn *cmNotifee) Connected(n network.Network, c network.Conn) { } pinfo.conns[c] = cm.clock.Now() - atomic.AddInt32(&cm.connCount, 1) + cm.connCount.Add(1) } // Disconnected is called by notifiers to inform that an existing connection has been closed or terminated. @@ -715,7 +715,7 @@ func (nn *cmNotifee) Disconnected(n network.Network, c network.Conn) { if len(cinf.conns) == 0 { delete(s.peers, p) } - atomic.AddInt32(&cm.connCount, -1) + cm.connCount.Add(-1) } // Listen is no-op in this implementation. diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/decay.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/decay.go index c10214cb..bdac0bef 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/decay.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/connmgr/decay.go @@ -38,7 +38,7 @@ type decayer struct { knownTags map[string]*decayingTag // lastTick stores the last time the decayer ticked. Guarded by atomic. - lastTick atomic.Value + lastTick atomic.Pointer[time.Time] // bumpTagCh queues bump commands to be processed by the loop. bumpTagCh chan bumpCmd @@ -89,7 +89,8 @@ func NewDecayer(cfg *DecayerCfg, mgr *BasicConnMgr) (*decayer, error) { doneCh: make(chan struct{}), } - d.lastTick.Store(d.clock.Now()) + now := d.clock.Now() + d.lastTick.Store(&now) // kick things off. go d.process() @@ -116,7 +117,7 @@ func (d *decayer) RegisterDecayingTag(name string, interval time.Duration, decay "some precision may be lost", name, interval, d.cfg.Resolution) } - lastTick := d.lastTick.Load().(time.Time) + lastTick := d.lastTick.Load() tag := &decayingTag{ trkr: d, name: name, @@ -156,14 +157,14 @@ func (d *decayer) process() { var ( bmp bumpCmd - now time.Time visit = make(map[*decayingTag]struct{}) ) for { select { - case now = <-ticker.C: - d.lastTick.Store(now) + case <-ticker.C: + now := d.clock.Now() + d.lastTick.Store(&now) d.tagsMu.Lock() for _, tag := range d.knownTags { @@ -291,8 +292,8 @@ type decayingTag struct { bumpFn connmgr.BumpFn // closed marks this tag as closed, so that if it's bumped after being - // closed, we can return an error. 0 = false; 1 = true; guarded by atomic. - closed int32 + // closed, we can return an error. + closed atomic.Bool } var _ connmgr.DecayingTag = (*decayingTag)(nil) @@ -307,7 +308,7 @@ func (t *decayingTag) Interval() time.Duration { // Bump bumps a tag for this peer. func (t *decayingTag) Bump(p peer.ID, delta int) error { - if atomic.LoadInt32(&t.closed) == 1 { + if t.closed.Load() { return fmt.Errorf("decaying tag %s had been closed; no further bumps are accepted", t.name) } @@ -324,7 +325,7 @@ func (t *decayingTag) Bump(p peer.ID, delta int) error { } func (t *decayingTag) Remove(p peer.ID) error { - if atomic.LoadInt32(&t.closed) == 1 { + if t.closed.Load() { return fmt.Errorf("decaying tag %s had been closed; no further removals are accepted", t.name) } @@ -341,7 +342,7 @@ func (t *decayingTag) Remove(p peer.ID) error { } func (t *decayingTag) Close() error { - if !atomic.CompareAndSwapInt32(&t.closed, 0, 1) { + if !t.closed.CompareAndSwap(false, true) { log.Warnf("duplicate decaying tag closure: %s; skipping", t.name) return nil } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/dial_worker.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/dial_worker.go index ff339d2f..02a7d632 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/dial_worker.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/dial_worker.go @@ -27,9 +27,9 @@ type dialResponse struct { } type pendRequest struct { - req dialRequest // the original request - err *DialError // dial error accumulator - addrs map[ma.Multiaddr]struct{} // pending addr dials + req dialRequest // the original request + err *DialError // dial error accumulator + addrs map[string]struct{} // pending address to dial. The key is a multiaddr } type addrDial struct { @@ -47,7 +47,7 @@ type dialWorker struct { reqch <-chan dialRequest reqno int requests map[int]*pendRequest - pending map[ma.Multiaddr]*addrDial + pending map[string]*addrDial // pending addresses to dial. The key is a multiaddr resch chan dialResult connected bool // true when a connection has been successfully established @@ -67,7 +67,7 @@ func newDialWorker(s *Swarm, p peer.ID, reqch <-chan dialRequest) *dialWorker { peer: p, reqch: reqch, requests: make(map[int]*pendRequest), - pending: make(map[ma.Multiaddr]*addrDial), + pending: make(map[string]*addrDial), resch: make(chan dialResult), } } @@ -109,10 +109,10 @@ loop: pr := &pendRequest{ req: req, err: &DialError{Peer: w.peer}, - addrs: make(map[ma.Multiaddr]struct{}), + addrs: make(map[string]struct{}), } for _, a := range addrs { - pr.addrs[a] = struct{}{} + pr.addrs[string(a.Bytes())] = struct{}{} } // check if any of the addrs has been successfully dialed and accumulate @@ -121,7 +121,7 @@ loop: var tojoin []*addrDial for _, a := range addrs { - ad, ok := w.pending[a] + ad, ok := w.pending[string(a.Bytes())] if !ok { todial = append(todial, a) continue @@ -136,7 +136,7 @@ loop: if ad.err != nil { // dial to this addr errored, accumulate the error pr.err.recordErr(a, ad.err) - delete(pr.addrs, a) + delete(pr.addrs, string(a.Bytes())) continue } @@ -167,7 +167,7 @@ loop: if len(todial) > 0 { for _, a := range todial { - w.pending[a] = &addrDial{addr: a, ctx: req.ctx, requests: []int{w.reqno}} + w.pending[string(a.Bytes())] = &addrDial{addr: a, ctx: req.ctx, requests: []int{w.reqno}} } w.nextDial = append(w.nextDial, todial...) @@ -180,7 +180,12 @@ loop: case <-w.triggerDial: for _, addr := range w.nextDial { // spawn the dial - ad := w.pending[addr] + ad, ok := w.pending[string(addr.Bytes())] + if !ok { + log.Warn("unexpectedly missing pending addrDial for addr") + // Assume nothing to dial here + continue + } err := w.s.dialNextAddr(ad.ctx, w.peer, addr, w.resch) if err != nil { w.dispatchError(ad, err) @@ -195,7 +200,12 @@ loop: w.connected = true } - ad := w.pending[res.Addr] + ad, ok := w.pending[string(res.Addr.Bytes())] + if !ok { + log.Warn("unexpectedly missing pending addrDial res") + // Assume nothing to do here + continue + } if res.Conn != nil { // we got a connection, add it to the swarm @@ -250,7 +260,7 @@ func (w *dialWorker) dispatchError(ad *addrDial, err error) { // accumulate the error pr.err.recordErr(ad.addr, err) - delete(pr.addrs, ad.addr) + delete(pr.addrs, string(ad.addr.Bytes())) if len(pr.addrs) == 0 { // all addrs have erred, dispatch dial error // but first do a last one check in case an acceptable connection has landed from @@ -274,7 +284,7 @@ func (w *dialWorker) dispatchError(ad *addrDial, err error) { // it is also necessary to preserve consisent behaviour with the old dialer -- TestDialBackoff // regresses without this. if err == ErrDialBackoff { - delete(w.pending, ad.addr) + delete(w.pending, string(ad.addr.Bytes())) } } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm.go index 7606b80c..9302c65c 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm.go @@ -11,6 +11,7 @@ import ( "time" "github.com/libp2p/go-libp2p/core/connmgr" + "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/metrics" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" @@ -71,6 +72,13 @@ func WithMetrics(reporter metrics.Reporter) Option { } } +func WithMetricsTracer(t MetricsTracer) Option { + return func(s *Swarm) error { + s.metricsTracer = t + return nil + } +} + func WithDialTimeout(t time.Duration) Option { return func(s *Swarm) error { s.dialTimeout = t @@ -104,6 +112,8 @@ type Swarm struct { // down before continuing. refs sync.WaitGroup + emitter event.Emitter + rcmgr network.ResourceManager local peer.ID @@ -139,7 +149,7 @@ type Swarm struct { maResolver *madns.Resolver // stream handlers - streamh atomic.Value + streamh atomic.Pointer[network.StreamHandler] // dialing helpers dsync *dialSync @@ -151,15 +161,21 @@ type Swarm struct { ctx context.Context // is canceled when Close is called ctxCancel context.CancelFunc - bwc metrics.Reporter + bwc metrics.Reporter + metricsTracer MetricsTracer } // NewSwarm constructs a Swarm. -func NewSwarm(local peer.ID, peers peerstore.Peerstore, opts ...Option) (*Swarm, error) { +func NewSwarm(local peer.ID, peers peerstore.Peerstore, eventBus event.Bus, opts ...Option) (*Swarm, error) { + emitter, err := eventBus.Emitter(new(event.EvtPeerConnectednessChanged)) + if err != nil { + return nil, err + } ctx, cancel := context.WithCancel(context.Background()) s := &Swarm{ local: local, peers: peers, + emitter: emitter, ctx: ctx, ctxCancel: cancel, dialTimeout: defaultDialTimeout, @@ -195,6 +211,8 @@ func (s *Swarm) Close() error { func (s *Swarm) close() { s.ctxCancel() + s.emitter.Close() + // Prevents new connections and/or listeners from being added to the swarm. s.listeners.Lock() listeners := s.listeners.m @@ -311,6 +329,7 @@ func (s *Swarm) addConn(tc transport.CapableConn, dir network.Direction) (*Conn, } c.streams.m = make(map[*Stream]struct{}) + isFirstConnection := len(s.conns.m[p]) == 0 s.conns.m[p] = append(s.conns.m[p], c) // Add two swarm refs: @@ -323,6 +342,15 @@ func (s *Swarm) addConn(tc transport.CapableConn, dir network.Direction) (*Conn, c.notifyLk.Lock() s.conns.Unlock() + // Emit event after releasing `s.conns` lock so that a consumer can still + // use swarm methods that need the `s.conns` lock. + if isFirstConnection { + s.emitter.Emit(event.EvtPeerConnectednessChanged{ + Peer: p, + Connectedness: network.Connected, + }) + } + s.notifyAll(func(f network.Notifiee) { f.Connected(s, c) }) @@ -339,13 +367,16 @@ func (s *Swarm) Peerstore() peerstore.Peerstore { // SetStreamHandler assigns the handler for new streams. func (s *Swarm) SetStreamHandler(handler network.StreamHandler) { - s.streamh.Store(handler) + s.streamh.Store(&handler) } // StreamHandler gets the handler for new streams. func (s *Swarm) StreamHandler() network.StreamHandler { - handler, _ := s.streamh.Load().(network.StreamHandler) - return handler + handler := s.streamh.Load() + if handler == nil { + return nil + } + return *handler } // NewStream creates a new stream on any available connection to peer, dialing @@ -599,21 +630,33 @@ func (s *Swarm) removeConn(c *Conn) { p := c.RemotePeer() s.conns.Lock() - defer s.conns.Unlock() + cs := s.conns.m[p] + + if len(cs) == 1 { + delete(s.conns.m, p) + s.conns.Unlock() + + // Emit event after releasing `s.conns` lock so that a consumer can still + // use swarm methods that need the `s.conns` lock. + s.emitter.Emit(event.EvtPeerConnectednessChanged{ + Peer: p, + Connectedness: network.NotConnected, + }) + return + } + + defer s.conns.Unlock() + for i, ci := range cs { if ci == c { - if len(cs) == 1 { - delete(s.conns.m, p) - } else { - // NOTE: We're intentionally preserving order. - // This way, connections to a peer are always - // sorted oldest to newest. - copy(cs[i:], cs[i+1:]) - cs[len(cs)-1] = nil - s.conns.m[p] = cs[:len(cs)-1] - } - return + // NOTE: We're intentionally preserving order. + // This way, connections to a peer are always + // sorted oldest to newest. + copy(cs[i:], cs[i+1:]) + cs[len(cs)-1] = nil + s.conns.m[p] = cs[:len(cs)-1] + break } } } @@ -630,3 +673,34 @@ func (s *Swarm) ResourceManager() network.ResourceManager { // Swarm is a Network. var _ network.Network = (*Swarm)(nil) var _ transport.TransportNetwork = (*Swarm)(nil) + +type connWithMetrics struct { + transport.CapableConn + opened time.Time + dir network.Direction + metricsTracer MetricsTracer +} + +func wrapWithMetrics(capableConn transport.CapableConn, metricsTracer MetricsTracer, opened time.Time, dir network.Direction) connWithMetrics { + c := connWithMetrics{CapableConn: capableConn, opened: opened, dir: dir, metricsTracer: metricsTracer} + c.metricsTracer.OpenedConnection(c.dir, capableConn.RemotePublicKey(), capableConn.ConnState(), capableConn.LocalMultiaddr()) + return c +} + +func (c connWithMetrics) completedHandshake() { + c.metricsTracer.CompletedHandshake(time.Since(c.opened), c.ConnState(), c.LocalMultiaddr()) +} + +func (c connWithMetrics) Close() error { + c.metricsTracer.ClosedConnection(c.dir, time.Since(c.opened), c.ConnState(), c.LocalMultiaddr()) + return c.CapableConn.Close() +} + +func (c connWithMetrics) Stat() network.ConnStats { + if cs, ok := c.CapableConn.(network.ConnStat); ok { + return cs.Stat() + } + return network.ConnStats{} +} + +var _ network.ConnStat = connWithMetrics{} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_conn.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_conn.go index 4de2727f..e770381a 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_conn.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_conn.go @@ -43,6 +43,10 @@ type Conn struct { var _ network.Conn = &Conn{} +func (c *Conn) IsClosed() bool { + return c.conn.IsClosed() +} + func (c *Conn) ID() string { // format: - return fmt.Sprintf("%s-%d", c.RemotePeer().Pretty()[0:10], c.id) @@ -126,6 +130,7 @@ func (c *Conn) start() { // We only get an error here when the swarm is closed or closing. if err != nil { + scope.Done() return } @@ -168,11 +173,6 @@ func (c *Conn) RemotePeer() peer.ID { return c.conn.RemotePeer() } -// LocalPrivateKey is the public key of the peer on this side -func (c *Conn) LocalPrivateKey() ic.PrivKey { - return c.conn.LocalPrivateKey() -} - // RemotePublicKey is the public key of the peer on the remote side func (c *Conn) RemotePublicKey() ic.PubKey { return c.conn.RemotePublicKey() @@ -203,11 +203,20 @@ func (c *Conn) NewStream(ctx context.Context) (network.Stream, error) { if err != nil { return nil, err } - ts, err := c.conn.OpenStream(ctx) + + s, err := c.openAndAddStream(ctx, scope) if err != nil { scope.Done() return nil, err } + return s, nil +} + +func (c *Conn) openAndAddStream(ctx context.Context, scope network.StreamManagementScope) (network.Stream, error) { + ts, err := c.conn.OpenStream(ctx) + if err != nil { + return nil, err + } return c.addStream(ts, network.DirOutbound, scope) } @@ -216,7 +225,6 @@ func (c *Conn) addStream(ts network.MuxedStream, dir network.Direction, scope ne // Are we still online? if c.streams.m == nil { c.streams.Unlock() - scope.Done() ts.Reset() return nil, ErrConnClosed } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_dial.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_dial.go index 29703f77..49c0fc7f 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_dial.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_dial.go @@ -12,11 +12,10 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/transport" - "github.com/lucas-clemente/quic-go" - ma "github.com/multiformats/go-multiaddr" madns "github.com/multiformats/go-multiaddr-dns" manet "github.com/multiformats/go-multiaddr/net" + "github.com/quic-go/quic-go" ) // The maximum number of address resolution steps we'll perform for a single @@ -335,6 +334,7 @@ func (s *Swarm) addrsForDial(ctx context.Context, p peer.ID) ([]ma.Multiaddr, er if forceDirect, _ := network.GetForceDirectDial(ctx); forceDirect { goodAddrs = ma.FilterAddrs(goodAddrs, s.nonProxyAddr) } + goodAddrs = network.DedupAddrs(goodAddrs) if len(goodAddrs) == 0 { return nil, ErrNoGoodAddresses @@ -483,6 +483,11 @@ func (s *Swarm) dialAddr(ctx context.Context, p peer.ID, addr ma.Multiaddr) (tra if s.local == p { return nil, ErrDialToSelf } + // Check before we start work + if err := ctx.Err(); err != nil { + log.Debugf("%s swarm not dialing. Context cancelled: %v. %s %s", s.local, err, p, addr) + return nil, err + } log.Debugf("%s swarm dialing %s %s", s.local, p, addr) tpt := s.TransportForDialing(addr) @@ -490,11 +495,20 @@ func (s *Swarm) dialAddr(ctx context.Context, p peer.ID, addr ma.Multiaddr) (tra return nil, ErrNoTransport } + start := time.Now() connC, err := tpt.Dial(ctx, addr, p) if err != nil { + if s.metricsTracer != nil { + s.metricsTracer.FailedDialing(addr, err) + } return nil, err } canonicallog.LogPeerStatus(100, connC.RemotePeer(), connC.RemoteMultiaddr(), "connection_status", "established", "dir", "outbound") + if s.metricsTracer != nil { + connWithMetrics := wrapWithMetrics(connC, s.metricsTracer, start, network.DirOutbound) + connWithMetrics.completedHandshake() + connC = connWithMetrics + } // Trust the transport? Yeah... right. if connC.RemotePeer() != p { diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_listen.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_listen.go index 9c5394d4..7ae18a9c 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_listen.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_listen.go @@ -130,6 +130,9 @@ func (s *Swarm) AddListenAddr(a ma.Multiaddr) error { return } canonicallog.LogPeerStatus(100, c.RemotePeer(), c.RemoteMultiaddr(), "connection_status", "established", "dir", "inbound") + if s.metricsTracer != nil { + c = wrapWithMetrics(c, s.metricsTracer, time.Now(), network.DirInbound) + } log.Debugf("swarm listener accepted connection: %s <-> %s", c.LocalMultiaddr(), c.RemoteMultiaddr()) s.refs.Add(1) @@ -153,7 +156,7 @@ func (s *Swarm) AddListenAddr(a ma.Multiaddr) error { func containsMultiaddr(addrs []ma.Multiaddr, addr ma.Multiaddr) bool { for _, a := range addrs { - if addr == a { + if addr.Equal(a) { return true } } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_metrics.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_metrics.go new file mode 100644 index 00000000..95e4b78b --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_metrics.go @@ -0,0 +1,215 @@ +package swarm + +import ( + "context" + "errors" + "net" + "strings" + "time" + + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/p2p/metricshelper" + + ma "github.com/multiformats/go-multiaddr" + + "github.com/prometheus/client_golang/prometheus" +) + +const metricNamespace = "libp2p_swarm" + +var ( + connsOpened = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "connections_opened_total", + Help: "Connections Opened", + }, + []string{"dir", "transport", "security", "muxer", "early_muxer", "ip_version"}, + ) + keyTypes = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "key_types_total", + Help: "key type", + }, + []string{"dir", "key_type"}, + ) + connsClosed = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "connections_closed_total", + Help: "Connections Closed", + }, + []string{"dir", "transport", "security", "muxer", "early_muxer", "ip_version"}, + ) + dialError = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "dial_errors_total", + Help: "Dial Error", + }, + []string{"transport", "error", "ip_version"}, + ) + connDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: metricNamespace, + Name: "connection_duration_seconds", + Help: "Duration of a Connection", + Buckets: prometheus.ExponentialBuckets(1.0/16, 2, 25), // up to 24 days + }, + []string{"dir", "transport", "security", "muxer", "early_muxer", "ip_version"}, + ) + connHandshakeLatency = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: metricNamespace, + Name: "handshake_latency_seconds", + Help: "Duration of the libp2p Handshake", + Buckets: prometheus.ExponentialBuckets(0.001, 1.3, 35), + }, + []string{"transport", "security", "muxer", "early_muxer", "ip_version"}, + ) + collectors = []prometheus.Collector{ + connsOpened, + keyTypes, + connsClosed, + dialError, + connDuration, + connHandshakeLatency, + } +) + +type MetricsTracer interface { + OpenedConnection(network.Direction, crypto.PubKey, network.ConnectionState, ma.Multiaddr) + ClosedConnection(network.Direction, time.Duration, network.ConnectionState, ma.Multiaddr) + CompletedHandshake(time.Duration, network.ConnectionState, ma.Multiaddr) + FailedDialing(ma.Multiaddr, error) +} + +type metricsTracer struct{} + +var _ MetricsTracer = &metricsTracer{} + +type metricsTracerSetting struct { + reg prometheus.Registerer +} + +type MetricsTracerOption func(*metricsTracerSetting) + +func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption { + return func(s *metricsTracerSetting) { + if reg != nil { + s.reg = reg + } + } +} + +func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer { + setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer} + for _, opt := range opts { + opt(setting) + } + metricshelper.RegisterCollectors(setting.reg, collectors...) + return &metricsTracer{} +} + +func appendConnectionState(tags []string, cs network.ConnectionState) []string { + if cs.Transport == "" { + // This shouldn't happen, unless the transport doesn't properly set the Transport field in the ConnectionState. + tags = append(tags, "unknown") + } else { + tags = append(tags, string(cs.Transport)) + } + // These might be empty, depending on the transport. + // For example, QUIC doesn't set security nor muxer. + tags = append(tags, string(cs.Security)) + tags = append(tags, string(cs.StreamMultiplexer)) + + earlyMuxer := "false" + if cs.UsedEarlyMuxerNegotiation { + earlyMuxer = "true" + } + tags = append(tags, earlyMuxer) + return tags +} + +func getIPVersion(addr ma.Multiaddr) string { + version := "unknown" + ma.ForEach(addr, func(c ma.Component) bool { + if c.Protocol().Code == ma.P_IP4 { + version = "ip4" + return false + } else if c.Protocol().Code == ma.P_IP6 { + version = "ip6" + return false + } + return true + }) + return version +} + +func (m *metricsTracer) OpenedConnection(dir network.Direction, p crypto.PubKey, cs network.ConnectionState, laddr ma.Multiaddr) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + + *tags = append(*tags, metricshelper.GetDirection(dir)) + *tags = appendConnectionState(*tags, cs) + *tags = append(*tags, getIPVersion(laddr)) + connsOpened.WithLabelValues(*tags...).Inc() + + *tags = (*tags)[:0] + *tags = append(*tags, metricshelper.GetDirection(dir)) + *tags = append(*tags, p.Type().String()) + keyTypes.WithLabelValues(*tags...).Inc() +} + +func (m *metricsTracer) ClosedConnection(dir network.Direction, duration time.Duration, cs network.ConnectionState, laddr ma.Multiaddr) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + + *tags = append(*tags, metricshelper.GetDirection(dir)) + *tags = appendConnectionState(*tags, cs) + *tags = append(*tags, getIPVersion(laddr)) + connsClosed.WithLabelValues(*tags...).Inc() + connDuration.WithLabelValues(*tags...).Observe(duration.Seconds()) +} + +func (m *metricsTracer) CompletedHandshake(t time.Duration, cs network.ConnectionState, laddr ma.Multiaddr) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + + *tags = appendConnectionState(*tags, cs) + *tags = append(*tags, getIPVersion(laddr)) + connHandshakeLatency.WithLabelValues(*tags...).Observe(t.Seconds()) +} + +var transports = [...]int{ma.P_CIRCUIT, ma.P_WEBRTC, ma.P_WEBTRANSPORT, ma.P_QUIC, ma.P_QUIC_V1, ma.P_WSS, ma.P_WS, ma.P_TCP} + +func (m *metricsTracer) FailedDialing(addr ma.Multiaddr, err error) { + var transport string + for _, t := range transports { + if _, err := addr.ValueForProtocol(t); err == nil { + transport = ma.ProtocolWithCode(t).Name + } + } + e := "other" + if errors.Is(err, context.Canceled) { + e = "canceled" + } else if errors.Is(err, context.DeadlineExceeded) { + e = "deadline" + } else { + nerr, ok := err.(net.Error) + if ok && nerr.Timeout() { + e = "timeout" + } else if strings.Contains(err.Error(), "connect: connection refused") { + e = "connection refused" + } + } + + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + + *tags = append(*tags, transport, e) + *tags = append(*tags, getIPVersion(addr)) + dialError.WithLabelValues(*tags...).Inc() +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_stream.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_stream.go index 7a5bb275..d372bcd8 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_stream.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/swarm/swarm_stream.go @@ -24,7 +24,7 @@ type Stream struct { closeOnce sync.Once - protocol atomic.Value + protocol atomic.Pointer[protocol.ID] stat network.Stats } @@ -108,9 +108,11 @@ func (s *Stream) remove() { // Protocol returns the protocol negotiated on this stream (if set). func (s *Stream) Protocol() protocol.ID { - // Ignore type error. It means that the protocol is unset. - p, _ := s.protocol.Load().(protocol.ID) - return p + p := s.protocol.Load() + if p == nil { + return "" + } + return *p } // SetProtocol sets the protocol for this stream. @@ -123,7 +125,7 @@ func (s *Stream) SetProtocol(p protocol.ID) error { return err } - s.protocol.Store(p) + s.protocol.Store(&p) return nil } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/upgrader/conn.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/upgrader/conn.go index 7a079b29..1c23a01a 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/net/upgrader/conn.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/upgrader/conn.go @@ -16,8 +16,9 @@ type transportConn struct { scope network.ConnManagementScope stat network.ConnStats - muxer protocol.ID - security protocol.ID + muxer protocol.ID + security protocol.ID + usedEarlyMuxerNegotiation bool } var _ transport.CapableConn = &transportConn{} @@ -56,8 +57,9 @@ func (t *transportConn) Close() error { func (t *transportConn) ConnState() network.ConnectionState { return network.ConnectionState{ - StreamMultiplexer: string(t.muxer), - Security: string(t.security), - Transport: "tcp", + StreamMultiplexer: t.muxer, + Security: t.security, + Transport: "tcp", + UsedEarlyMuxerNegotiation: t.usedEarlyMuxerNegotiation, } } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/net/upgrader/upgrader.go b/vendor/github.com/libp2p/go-libp2p/p2p/net/upgrader/upgrader.go index 5a69efb0..d18c16ea 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/net/upgrader/upgrader.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/net/upgrader/upgrader.go @@ -53,13 +53,13 @@ type upgrader struct { connGater connmgr.ConnectionGater rcmgr network.ResourceManager - muxerMuxer *mss.MultistreamMuxer + muxerMuxer *mss.MultistreamMuxer[protocol.ID] muxers []StreamMuxer - muxerIDs []string + muxerIDs []protocol.ID security []sec.SecureTransport - securityMuxer *mss.MultistreamMuxer - securityIDs []string + securityMuxer *mss.MultistreamMuxer[protocol.ID] + securityIDs []protocol.ID // AcceptTimeout is the maximum duration an Accept is allowed to take. // This includes the time between accepting the raw network connection, @@ -77,10 +77,10 @@ func New(security []sec.SecureTransport, muxers []StreamMuxer, psk ipnet.PSK, rc rcmgr: rcmgr, connGater: connGater, psk: psk, - muxerMuxer: mss.NewMultistreamMuxer(), + muxerMuxer: mss.NewMultistreamMuxer[protocol.ID](), muxers: muxers, security: security, - securityMuxer: mss.NewMultistreamMuxer(), + securityMuxer: mss.NewMultistreamMuxer[protocol.ID](), } for _, opt := range opts { if err := opt(u); err != nil { @@ -90,15 +90,15 @@ func New(security []sec.SecureTransport, muxers []StreamMuxer, psk ipnet.PSK, rc if u.rcmgr == nil { u.rcmgr = &network.NullResourceManager{} } - u.muxerIDs = make([]string, 0, len(muxers)) + u.muxerIDs = make([]protocol.ID, 0, len(muxers)) for _, m := range muxers { - u.muxerMuxer.AddHandler(string(m.ID), nil) - u.muxerIDs = append(u.muxerIDs, string(m.ID)) + u.muxerMuxer.AddHandler(m.ID, nil) + u.muxerIDs = append(u.muxerIDs, m.ID) } - u.securityIDs = make([]string, 0, len(security)) + u.securityIDs = make([]protocol.ID, 0, len(security)) for _, s := range security { - u.securityMuxer.AddHandler(string(s.ID()), nil) - u.securityIDs = append(u.securityIDs, string(s.ID())) + u.securityMuxer.AddHandler(s.ID(), nil) + u.securityIDs = append(u.securityIDs, s.ID()) } return u, nil } @@ -144,7 +144,7 @@ func (u *upgrader) upgrade(ctx context.Context, t transport.Transport, maconn ma pconn, err := pnet.NewProtectedConn(u.psk, conn) if err != nil { conn.Close() - return nil, fmt.Errorf("failed to setup private network protector: %s", err) + return nil, fmt.Errorf("failed to setup private network protector: %w", err) } conn = pconn } else if ipnet.ForcePrivateNetwork { @@ -155,7 +155,7 @@ func (u *upgrader) upgrade(ctx context.Context, t transport.Transport, maconn ma sconn, security, server, err := u.setupSecurity(ctx, conn, p, dir) if err != nil { conn.Close() - return nil, fmt.Errorf("failed to negotiate security protocol: %s", err) + return nil, fmt.Errorf("failed to negotiate security protocol: %w", err) } // call the connection gater, if one is registered. @@ -182,18 +182,19 @@ func (u *upgrader) upgrade(ctx context.Context, t transport.Transport, maconn ma muxer, smconn, err := u.setupMuxer(ctx, sconn, server, connScope.PeerScope()) if err != nil { sconn.Close() - return nil, fmt.Errorf("failed to negotiate stream multiplexer: %s", err) + return nil, fmt.Errorf("failed to negotiate stream multiplexer: %w", err) } tc := &transportConn{ - MuxedConn: smconn, - ConnMultiaddrs: maconn, - ConnSecurity: sconn, - transport: t, - stat: stat, - scope: connScope, - muxer: muxer, - security: security, + MuxedConn: smconn, + ConnMultiaddrs: maconn, + ConnSecurity: sconn, + transport: t, + stat: stat, + scope: connScope, + muxer: muxer, + security: security, + usedEarlyMuxerNegotiation: sconn.ConnState().UsedEarlyMuxerNegotiation, } return tc, nil } @@ -219,7 +220,7 @@ func (u *upgrader) negotiateMuxer(nc net.Conn, isServer bool) (*StreamMuxer, err return nil, err } - var proto string + var proto protocol.ID if isServer { selected, _, err := u.muxerMuxer.Negotiate(nc) if err != nil { @@ -244,9 +245,9 @@ func (u *upgrader) negotiateMuxer(nc net.Conn, isServer bool) (*StreamMuxer, err return nil, fmt.Errorf("selected protocol we don't have a transport for") } -func (u *upgrader) getMuxerByID(id string) *StreamMuxer { +func (u *upgrader) getMuxerByID(id protocol.ID) *StreamMuxer { for _, m := range u.muxers { - if string(m.ID) == id { + if m.ID == id { return &m } } @@ -265,7 +266,7 @@ func (u *upgrader) setupMuxer(ctx context.Context, conn sec.SecureConn, server b if err != nil { return "", nil, err } - return protocol.ID(muxerSelected), c, nil + return muxerSelected, c, nil } type result struct { @@ -298,9 +299,9 @@ func (u *upgrader) setupMuxer(ctx context.Context, conn sec.SecureConn, server b } } -func (u *upgrader) getSecurityByID(id string) sec.SecureTransport { +func (u *upgrader) getSecurityByID(id protocol.ID) sec.SecureTransport { for _, s := range u.security { - if string(s.ID()) == id { + if s.ID() == id { return s } } @@ -309,7 +310,7 @@ func (u *upgrader) getSecurityByID(id string) sec.SecureTransport { func (u *upgrader) negotiateSecurity(ctx context.Context, insecure net.Conn, server bool) (sec.SecureTransport, bool, error) { type result struct { - proto string + proto protocol.ID iamserver bool err error } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb/Makefile b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb/Makefile deleted file mode 100644 index fd110685..00000000 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -PB = $(wildcard *.proto) -GO = $(PB:.proto=.pb.go) - -all: $(GO) - -%.pb.go: %.proto - protoc --gogofast_out=. $< - -clean: - rm -f *.pb.go - rm -f *.go diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb/circuitv1.pb.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb/circuitv1.pb.go deleted file mode 100644 index 1715f419..00000000 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb/circuitv1.pb.go +++ /dev/null @@ -1,868 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: circuitv1.proto - -package circuitv1_pb - -import ( - fmt "fmt" - github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type CircuitRelay_Status int32 - -const ( - CircuitRelay_SUCCESS CircuitRelay_Status = 100 - CircuitRelay_HOP_SRC_ADDR_TOO_LONG CircuitRelay_Status = 220 - CircuitRelay_HOP_DST_ADDR_TOO_LONG CircuitRelay_Status = 221 - CircuitRelay_HOP_SRC_MULTIADDR_INVALID CircuitRelay_Status = 250 - CircuitRelay_HOP_DST_MULTIADDR_INVALID CircuitRelay_Status = 251 - CircuitRelay_HOP_NO_CONN_TO_DST CircuitRelay_Status = 260 - CircuitRelay_HOP_CANT_DIAL_DST CircuitRelay_Status = 261 - CircuitRelay_HOP_CANT_OPEN_DST_STREAM CircuitRelay_Status = 262 - CircuitRelay_HOP_CANT_SPEAK_RELAY CircuitRelay_Status = 270 - CircuitRelay_HOP_CANT_RELAY_TO_SELF CircuitRelay_Status = 280 - CircuitRelay_STOP_SRC_ADDR_TOO_LONG CircuitRelay_Status = 320 - CircuitRelay_STOP_DST_ADDR_TOO_LONG CircuitRelay_Status = 321 - CircuitRelay_STOP_SRC_MULTIADDR_INVALID CircuitRelay_Status = 350 - CircuitRelay_STOP_DST_MULTIADDR_INVALID CircuitRelay_Status = 351 - CircuitRelay_STOP_RELAY_REFUSED CircuitRelay_Status = 390 - CircuitRelay_MALFORMED_MESSAGE CircuitRelay_Status = 400 -) - -var CircuitRelay_Status_name = map[int32]string{ - 100: "SUCCESS", - 220: "HOP_SRC_ADDR_TOO_LONG", - 221: "HOP_DST_ADDR_TOO_LONG", - 250: "HOP_SRC_MULTIADDR_INVALID", - 251: "HOP_DST_MULTIADDR_INVALID", - 260: "HOP_NO_CONN_TO_DST", - 261: "HOP_CANT_DIAL_DST", - 262: "HOP_CANT_OPEN_DST_STREAM", - 270: "HOP_CANT_SPEAK_RELAY", - 280: "HOP_CANT_RELAY_TO_SELF", - 320: "STOP_SRC_ADDR_TOO_LONG", - 321: "STOP_DST_ADDR_TOO_LONG", - 350: "STOP_SRC_MULTIADDR_INVALID", - 351: "STOP_DST_MULTIADDR_INVALID", - 390: "STOP_RELAY_REFUSED", - 400: "MALFORMED_MESSAGE", -} - -var CircuitRelay_Status_value = map[string]int32{ - "SUCCESS": 100, - "HOP_SRC_ADDR_TOO_LONG": 220, - "HOP_DST_ADDR_TOO_LONG": 221, - "HOP_SRC_MULTIADDR_INVALID": 250, - "HOP_DST_MULTIADDR_INVALID": 251, - "HOP_NO_CONN_TO_DST": 260, - "HOP_CANT_DIAL_DST": 261, - "HOP_CANT_OPEN_DST_STREAM": 262, - "HOP_CANT_SPEAK_RELAY": 270, - "HOP_CANT_RELAY_TO_SELF": 280, - "STOP_SRC_ADDR_TOO_LONG": 320, - "STOP_DST_ADDR_TOO_LONG": 321, - "STOP_SRC_MULTIADDR_INVALID": 350, - "STOP_DST_MULTIADDR_INVALID": 351, - "STOP_RELAY_REFUSED": 390, - "MALFORMED_MESSAGE": 400, -} - -func (x CircuitRelay_Status) Enum() *CircuitRelay_Status { - p := new(CircuitRelay_Status) - *p = x - return p -} - -func (x CircuitRelay_Status) String() string { - return proto.EnumName(CircuitRelay_Status_name, int32(x)) -} - -func (x *CircuitRelay_Status) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CircuitRelay_Status_value, data, "CircuitRelay_Status") - if err != nil { - return err - } - *x = CircuitRelay_Status(value) - return nil -} - -func (CircuitRelay_Status) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_129c008e7addef67, []int{0, 0} -} - -type CircuitRelay_Type int32 - -const ( - CircuitRelay_HOP CircuitRelay_Type = 1 - CircuitRelay_STOP CircuitRelay_Type = 2 - CircuitRelay_STATUS CircuitRelay_Type = 3 - CircuitRelay_CAN_HOP CircuitRelay_Type = 4 -) - -var CircuitRelay_Type_name = map[int32]string{ - 1: "HOP", - 2: "STOP", - 3: "STATUS", - 4: "CAN_HOP", -} - -var CircuitRelay_Type_value = map[string]int32{ - "HOP": 1, - "STOP": 2, - "STATUS": 3, - "CAN_HOP": 4, -} - -func (x CircuitRelay_Type) Enum() *CircuitRelay_Type { - p := new(CircuitRelay_Type) - *p = x - return p -} - -func (x CircuitRelay_Type) String() string { - return proto.EnumName(CircuitRelay_Type_name, int32(x)) -} - -func (x *CircuitRelay_Type) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CircuitRelay_Type_value, data, "CircuitRelay_Type") - if err != nil { - return err - } - *x = CircuitRelay_Type(value) - return nil -} - -func (CircuitRelay_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_129c008e7addef67, []int{0, 1} -} - -type CircuitRelay struct { - Type *CircuitRelay_Type `protobuf:"varint,1,opt,name=type,enum=circuitv1.pb.CircuitRelay_Type" json:"type,omitempty"` - SrcPeer *CircuitRelay_Peer `protobuf:"bytes,2,opt,name=srcPeer" json:"srcPeer,omitempty"` - DstPeer *CircuitRelay_Peer `protobuf:"bytes,3,opt,name=dstPeer" json:"dstPeer,omitempty"` - Code *CircuitRelay_Status `protobuf:"varint,4,opt,name=code,enum=circuitv1.pb.CircuitRelay_Status" json:"code,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CircuitRelay) Reset() { *m = CircuitRelay{} } -func (m *CircuitRelay) String() string { return proto.CompactTextString(m) } -func (*CircuitRelay) ProtoMessage() {} -func (*CircuitRelay) Descriptor() ([]byte, []int) { - return fileDescriptor_129c008e7addef67, []int{0} -} -func (m *CircuitRelay) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CircuitRelay) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CircuitRelay.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CircuitRelay) XXX_Merge(src proto.Message) { - xxx_messageInfo_CircuitRelay.Merge(m, src) -} -func (m *CircuitRelay) XXX_Size() int { - return m.Size() -} -func (m *CircuitRelay) XXX_DiscardUnknown() { - xxx_messageInfo_CircuitRelay.DiscardUnknown(m) -} - -var xxx_messageInfo_CircuitRelay proto.InternalMessageInfo - -func (m *CircuitRelay) GetType() CircuitRelay_Type { - if m != nil && m.Type != nil { - return *m.Type - } - return CircuitRelay_HOP -} - -func (m *CircuitRelay) GetSrcPeer() *CircuitRelay_Peer { - if m != nil { - return m.SrcPeer - } - return nil -} - -func (m *CircuitRelay) GetDstPeer() *CircuitRelay_Peer { - if m != nil { - return m.DstPeer - } - return nil -} - -func (m *CircuitRelay) GetCode() CircuitRelay_Status { - if m != nil && m.Code != nil { - return *m.Code - } - return CircuitRelay_SUCCESS -} - -type CircuitRelay_Peer struct { - Id []byte `protobuf:"bytes,1,req,name=id" json:"id,omitempty"` - Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs" json:"addrs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CircuitRelay_Peer) Reset() { *m = CircuitRelay_Peer{} } -func (m *CircuitRelay_Peer) String() string { return proto.CompactTextString(m) } -func (*CircuitRelay_Peer) ProtoMessage() {} -func (*CircuitRelay_Peer) Descriptor() ([]byte, []int) { - return fileDescriptor_129c008e7addef67, []int{0, 0} -} -func (m *CircuitRelay_Peer) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CircuitRelay_Peer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CircuitRelay_Peer.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CircuitRelay_Peer) XXX_Merge(src proto.Message) { - xxx_messageInfo_CircuitRelay_Peer.Merge(m, src) -} -func (m *CircuitRelay_Peer) XXX_Size() int { - return m.Size() -} -func (m *CircuitRelay_Peer) XXX_DiscardUnknown() { - xxx_messageInfo_CircuitRelay_Peer.DiscardUnknown(m) -} - -var xxx_messageInfo_CircuitRelay_Peer proto.InternalMessageInfo - -func (m *CircuitRelay_Peer) GetId() []byte { - if m != nil { - return m.Id - } - return nil -} - -func (m *CircuitRelay_Peer) GetAddrs() [][]byte { - if m != nil { - return m.Addrs - } - return nil -} - -func init() { - proto.RegisterEnum("circuitv1.pb.CircuitRelay_Status", CircuitRelay_Status_name, CircuitRelay_Status_value) - proto.RegisterEnum("circuitv1.pb.CircuitRelay_Type", CircuitRelay_Type_name, CircuitRelay_Type_value) - proto.RegisterType((*CircuitRelay)(nil), "circuitv1.pb.CircuitRelay") - proto.RegisterType((*CircuitRelay_Peer)(nil), "circuitv1.pb.CircuitRelay.Peer") -} - -func init() { proto.RegisterFile("circuitv1.proto", fileDescriptor_129c008e7addef67) } - -var fileDescriptor_129c008e7addef67 = []byte{ - // 475 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x4d, 0x6f, 0xd3, 0x30, - 0x18, 0x80, 0x65, 0x27, 0xb4, 0xe8, 0x5d, 0x35, 0x8c, 0x35, 0x46, 0x56, 0x44, 0x57, 0x7a, 0xea, - 0x01, 0x55, 0x62, 0x88, 0x03, 0x47, 0x93, 0xb8, 0x5b, 0x45, 0x1a, 0x57, 0xb6, 0x8b, 0xc4, 0xc9, - 0x2a, 0x4d, 0x0e, 0x95, 0x90, 0x5a, 0xa5, 0x19, 0x52, 0xef, 0xb0, 0x23, 0xe2, 0x06, 0x3f, 0x07, - 0x38, 0x71, 0xe4, 0x07, 0xf0, 0xa5, 0xfe, 0x0c, 0xb8, 0x20, 0xbb, 0x34, 0xab, 0xe8, 0x34, 0xed, - 0xd8, 0xf7, 0x79, 0x1e, 0xd7, 0x79, 0x13, 0xb8, 0x31, 0x9e, 0xe4, 0xe3, 0xd3, 0x49, 0xf1, 0xea, - 0x41, 0x67, 0x96, 0x4f, 0x8b, 0x29, 0xad, 0x6d, 0x0c, 0x5e, 0xb4, 0xde, 0x57, 0xa0, 0x16, 0xae, - 0x06, 0x32, 0x7b, 0x39, 0x5a, 0xd0, 0x87, 0xe0, 0x17, 0x8b, 0x59, 0x16, 0xa0, 0x26, 0x6a, 0xef, - 0x1e, 0x1d, 0x76, 0x36, 0xed, 0xce, 0xa6, 0xd9, 0xd1, 0x8b, 0x59, 0x26, 0x9d, 0x4c, 0x1f, 0x43, - 0x75, 0x9e, 0x8f, 0x07, 0x59, 0x96, 0x07, 0xb8, 0x89, 0xda, 0x3b, 0x97, 0x76, 0x56, 0x93, 0x6b, - 0xdf, 0xa6, 0xe9, 0xbc, 0x70, 0xa9, 0x77, 0xc5, 0xf4, 0x9f, 0x4f, 0x1f, 0x81, 0x3f, 0x9e, 0xa6, - 0x59, 0xe0, 0xbb, 0xab, 0xde, 0xbb, 0xa4, 0x53, 0xc5, 0xa8, 0x38, 0x9d, 0x4b, 0xa7, 0xd7, 0xef, - 0x83, 0xef, 0xf2, 0x5d, 0xc0, 0x93, 0x34, 0x40, 0x4d, 0xdc, 0xae, 0x49, 0x3c, 0x49, 0xe9, 0x1e, - 0x5c, 0x1b, 0xa5, 0x69, 0x3e, 0x0f, 0x70, 0xd3, 0x6b, 0xd7, 0xe4, 0xea, 0x47, 0xeb, 0xb3, 0x07, - 0x95, 0x55, 0x4e, 0x77, 0xa0, 0xaa, 0x86, 0x61, 0xc8, 0x95, 0x22, 0x29, 0xad, 0xc3, 0xad, 0x13, - 0x31, 0x30, 0x4a, 0x86, 0x86, 0x45, 0x91, 0x34, 0x5a, 0x08, 0x13, 0x8b, 0xe4, 0x98, 0x7c, 0x43, - 0x6b, 0x16, 0x29, 0xfd, 0x1f, 0xfb, 0x8e, 0x68, 0x03, 0x0e, 0xd6, 0x5d, 0x7f, 0x18, 0xeb, 0x9e, - 0x13, 0x7a, 0xc9, 0x33, 0x16, 0xf7, 0x22, 0xf2, 0xbb, 0xe4, 0xb6, 0xdd, 0xe6, 0x7f, 0x10, 0xbd, - 0x0d, 0xd4, 0xf2, 0x44, 0x98, 0x50, 0x24, 0x89, 0xd1, 0xc2, 0xaa, 0xe4, 0x35, 0xa6, 0xfb, 0x70, - 0xd3, 0x82, 0x90, 0x25, 0xda, 0x44, 0x3d, 0x16, 0xbb, 0xf9, 0x1b, 0x4c, 0xef, 0x42, 0x50, 0xce, - 0xc5, 0x80, 0x27, 0xee, 0x68, 0xa5, 0x25, 0x67, 0x7d, 0x72, 0x86, 0xe9, 0x01, 0xec, 0x95, 0x58, - 0x0d, 0x38, 0x7b, 0x6a, 0x24, 0x8f, 0xd9, 0x73, 0xf2, 0x16, 0xd3, 0x3b, 0xb0, 0x5f, 0x22, 0x37, - 0xb4, 0xff, 0xa6, 0x78, 0xdc, 0x25, 0x1f, 0x1c, 0x54, 0xfa, 0xc2, 0x05, 0x7c, 0x3c, 0x87, 0xdb, - 0x1b, 0xf8, 0x84, 0xe9, 0x21, 0xd4, 0xcb, 0x72, 0xfb, 0x11, 0x7f, 0x9c, 0x0b, 0x17, 0xef, 0xe0, - 0x27, 0xb6, 0x3b, 0x70, 0xc2, 0xea, 0x52, 0x92, 0x77, 0x87, 0x8a, 0x47, 0xe4, 0xcc, 0xb3, 0x3b, - 0xe8, 0xb3, 0xb8, 0x2b, 0x64, 0x9f, 0x47, 0xa6, 0xcf, 0x95, 0x62, 0xc7, 0x9c, 0xbc, 0xf3, 0x5a, - 0x47, 0xe0, 0xdb, 0xaf, 0x95, 0x56, 0xc1, 0x3b, 0x11, 0x03, 0x82, 0xe8, 0x75, 0xf0, 0xed, 0x09, - 0x04, 0x53, 0x80, 0x8a, 0xd2, 0x4c, 0x0f, 0x15, 0xf1, 0xec, 0x0b, 0x0e, 0x59, 0x62, 0xac, 0xe2, - 0x3f, 0xa9, 0x7d, 0x59, 0x36, 0xd0, 0xd7, 0x65, 0x03, 0xfd, 0x5a, 0x36, 0xd0, 0xdf, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x52, 0xcc, 0x98, 0x82, 0x47, 0x03, 0x00, 0x00, -} - -func (m *CircuitRelay) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CircuitRelay) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CircuitRelay) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Code != nil { - i = encodeVarintCircuitv1(dAtA, i, uint64(*m.Code)) - i-- - dAtA[i] = 0x20 - } - if m.DstPeer != nil { - { - size, err := m.DstPeer.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCircuitv1(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.SrcPeer != nil { - { - size, err := m.SrcPeer.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCircuitv1(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Type != nil { - i = encodeVarintCircuitv1(dAtA, i, uint64(*m.Type)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *CircuitRelay_Peer) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CircuitRelay_Peer) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CircuitRelay_Peer) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Addrs) > 0 { - for iNdEx := len(m.Addrs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Addrs[iNdEx]) - copy(dAtA[i:], m.Addrs[iNdEx]) - i = encodeVarintCircuitv1(dAtA, i, uint64(len(m.Addrs[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if m.Id == nil { - return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("id") - } else { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarintCircuitv1(dAtA, i, uint64(len(m.Id))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintCircuitv1(dAtA []byte, offset int, v uint64) int { - offset -= sovCircuitv1(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *CircuitRelay) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Type != nil { - n += 1 + sovCircuitv1(uint64(*m.Type)) - } - if m.SrcPeer != nil { - l = m.SrcPeer.Size() - n += 1 + l + sovCircuitv1(uint64(l)) - } - if m.DstPeer != nil { - l = m.DstPeer.Size() - n += 1 + l + sovCircuitv1(uint64(l)) - } - if m.Code != nil { - n += 1 + sovCircuitv1(uint64(*m.Code)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CircuitRelay_Peer) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Id != nil { - l = len(m.Id) - n += 1 + l + sovCircuitv1(uint64(l)) - } - if len(m.Addrs) > 0 { - for _, b := range m.Addrs { - l = len(b) - n += 1 + l + sovCircuitv1(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovCircuitv1(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozCircuitv1(x uint64) (n int) { - return sovCircuitv1(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *CircuitRelay) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuitv1 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CircuitRelay: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CircuitRelay: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var v CircuitRelay_Type - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuitv1 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= CircuitRelay_Type(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Type = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SrcPeer", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuitv1 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCircuitv1 - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCircuitv1 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SrcPeer == nil { - m.SrcPeer = &CircuitRelay_Peer{} - } - if err := m.SrcPeer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DstPeer", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuitv1 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCircuitv1 - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCircuitv1 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DstPeer == nil { - m.DstPeer = &CircuitRelay_Peer{} - } - if err := m.DstPeer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) - } - var v CircuitRelay_Status - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuitv1 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= CircuitRelay_Status(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Code = &v - default: - iNdEx = preIndex - skippy, err := skipCircuitv1(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCircuitv1 - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCircuitv1 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CircuitRelay_Peer) Unmarshal(dAtA []byte) error { - var hasFields [1]uint64 - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuitv1 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Peer: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Peer: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuitv1 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCircuitv1 - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCircuitv1 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...) - if m.Id == nil { - m.Id = []byte{} - } - iNdEx = postIndex - hasFields[0] |= uint64(0x00000001) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuitv1 - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCircuitv1 - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCircuitv1 - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addrs = append(m.Addrs, make([]byte, postIndex-iNdEx)) - copy(m.Addrs[len(m.Addrs)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCircuitv1(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthCircuitv1 - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthCircuitv1 - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if hasFields[0]&uint64(0x00000001) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("id") - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipCircuitv1(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCircuitv1 - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCircuitv1 - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCircuitv1 - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthCircuitv1 - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupCircuitv1 - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthCircuitv1 - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthCircuitv1 = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowCircuitv1 = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupCircuitv1 = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb/circuitv1.proto b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb/circuitv1.proto deleted file mode 100644 index c591f075..00000000 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb/circuitv1.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto2"; - -package circuitv1.pb; - -message CircuitRelay { - - enum Status { - SUCCESS = 100; - HOP_SRC_ADDR_TOO_LONG = 220; - HOP_DST_ADDR_TOO_LONG = 221; - HOP_SRC_MULTIADDR_INVALID = 250; - HOP_DST_MULTIADDR_INVALID = 251; - HOP_NO_CONN_TO_DST = 260; - HOP_CANT_DIAL_DST = 261; - HOP_CANT_OPEN_DST_STREAM = 262; - HOP_CANT_SPEAK_RELAY = 270; - HOP_CANT_RELAY_TO_SELF = 280; - STOP_SRC_ADDR_TOO_LONG = 320; - STOP_DST_ADDR_TOO_LONG = 321; - STOP_SRC_MULTIADDR_INVALID = 350; - STOP_DST_MULTIADDR_INVALID = 351; - STOP_RELAY_REFUSED = 390; - MALFORMED_MESSAGE = 400; - } - - enum Type { // RPC identifier, either HOP, STOP or STATUS - HOP = 1; - STOP = 2; - STATUS = 3; - CAN_HOP = 4; - } - - message Peer { - required bytes id = 1; // peer id - repeated bytes addrs = 2; // peer's known addresses - } - - optional Type type = 1; // Type of the message - - optional Peer srcPeer = 2; // srcPeer and dstPeer are used when Type is HOP or STOP - optional Peer dstPeer = 3; - - optional Status code = 4; // Status code, used when Type is STATUS -} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/relay/options.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/relay/options.go deleted file mode 100644 index bfd2ed89..00000000 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/relay/options.go +++ /dev/null @@ -1,46 +0,0 @@ -package relay - -import ( - "github.com/libp2p/go-libp2p/core/peer" -) - -type Resources struct { - // MaxCircuits is the maximum number of active relay connections - MaxCircuits int - - // MaxCircuitsPerPeer is the maximum number of active relay connections per peer - MaxCircuitsPerPeer int - - // BufferSize is the buffer size for relaying in each direction - BufferSize int -} - -func DefaultResources() Resources { - return Resources{ - MaxCircuits: 1024, - MaxCircuitsPerPeer: 64, - BufferSize: 4096, - } -} - -type ACLFilter interface { - AllowHop(src, dest peer.ID) bool -} - -type Option func(r *Relay) error - -// WithResources specifies resource limits for the relay -func WithResources(rc Resources) Option { - return func(r *Relay) error { - r.rc = rc - return nil - } -} - -// WithACL specifies an ACLFilter for access control -func WithACL(acl ACLFilter) Option { - return func(r *Relay) error { - r.acl = acl - return nil - } -} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/relay/relay.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/relay/relay.go deleted file mode 100644 index 3b6f7adc..00000000 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/relay/relay.go +++ /dev/null @@ -1,452 +0,0 @@ -package relay - -import ( - "context" - "fmt" - "io" - "sync" - "sync/atomic" - "time" - - "github.com/libp2p/go-libp2p/core/host" - "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/peer" - pb "github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb" - "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util" - - logging "github.com/ipfs/go-log/v2" - pool "github.com/libp2p/go-buffer-pool" - ma "github.com/multiformats/go-multiaddr" -) - -var log = logging.Logger("relay") - -const ( - ProtoID = "/libp2p/circuit/relay/0.1.0" - - ServiceName = "libp2p.relay/v1" - - StreamTimeout = time.Minute - ConnectTimeout = 30 * time.Second - HandshakeTimeout = time.Minute - - relayHopTag = "relay-v1-hop" - relayHopTagValue = 2 - - maxMessageSize = 4096 -) - -type Relay struct { - closed int32 - ctx context.Context - cancel context.CancelFunc - - host host.Host - rc Resources - acl ACLFilter - scope network.ResourceScopeSpan - - mx sync.Mutex - conns map[peer.ID]int - active int -} - -func NewRelay(h host.Host, opts ...Option) (*Relay, error) { - r := &Relay{ - host: h, - rc: DefaultResources(), - conns: make(map[peer.ID]int), - } - r.ctx, r.cancel = context.WithCancel(context.Background()) - - for _, opt := range opts { - err := opt(r) - if err != nil { - return nil, fmt.Errorf("error applying relay option: %w", err) - } - } - - // get a scope for memory reservations at service level - err := h.Network().ResourceManager().ViewService(ServiceName, - func(s network.ServiceScope) error { - var err error - r.scope, err = s.BeginSpan() - return err - }) - if err != nil { - return nil, err - } - - h.SetStreamHandler(ProtoID, r.handleStream) - - return r, nil -} - -func (r *Relay) Close() error { - if atomic.CompareAndSwapInt32(&r.closed, 0, 1) { - r.host.RemoveStreamHandler(ProtoID) - r.scope.Done() - r.cancel() - } - return nil -} - -func (r *Relay) handleStream(s network.Stream) { - log.Debugf("new relay stream from: %s", s.Conn().RemotePeer()) - - if err := s.Scope().SetService(ServiceName); err != nil { - log.Debugf("error attaching stream to relay service: %s", err) - s.Reset() - return - } - - if err := s.Scope().ReserveMemory(maxMessageSize, network.ReservationPriorityAlways); err != nil { - log.Debugf("error reserving memory for stream: %s", err) - s.Reset() - return - } - defer s.Scope().ReleaseMemory(maxMessageSize) - - rd := util.NewDelimitedReader(s, maxMessageSize) - defer rd.Close() - - s.SetReadDeadline(time.Now().Add(StreamTimeout)) - - var msg pb.CircuitRelay - - err := rd.ReadMsg(&msg) - if err != nil { - r.handleError(s, pb.CircuitRelay_MALFORMED_MESSAGE) - return - } - s.SetReadDeadline(time.Time{}) - - switch msg.GetType() { - case pb.CircuitRelay_HOP: - r.handleHopStream(s, &msg) - case pb.CircuitRelay_CAN_HOP: - r.handleCanHop(s, &msg) - case pb.CircuitRelay_STOP: - r.handleError(s, pb.CircuitRelay_STOP_RELAY_REFUSED) - default: - log.Warnf("unexpected relay handshake: %d", msg.GetType()) - r.handleError(s, pb.CircuitRelay_MALFORMED_MESSAGE) - } -} - -func (r *Relay) handleHopStream(s network.Stream, msg *pb.CircuitRelay) { - span, err := r.scope.BeginSpan() - if err != nil { - log.Debugf("failed to begin relay transaction: %s", err) - r.handleError(s, pb.CircuitRelay_HOP_CANT_SPEAK_RELAY) - return - } - - fail := func(code pb.CircuitRelay_Status) { - span.Done() - r.handleError(s, code) - } - - // reserve buffers for the relay - if err := span.ReserveMemory(2*r.rc.BufferSize, network.ReservationPriorityHigh); err != nil { - log.Debugf("error reserving memory for relay: %s", err) - fail(pb.CircuitRelay_HOP_CANT_SPEAK_RELAY) - return - } - - src, err := peerToPeerInfo(msg.GetSrcPeer()) - if err != nil { - fail(pb.CircuitRelay_HOP_SRC_MULTIADDR_INVALID) - return - } - - if src.ID != s.Conn().RemotePeer() { - fail(pb.CircuitRelay_HOP_SRC_MULTIADDR_INVALID) - return - } - - dest, err := peerToPeerInfo(msg.GetDstPeer()) - if err != nil { - fail(pb.CircuitRelay_HOP_DST_MULTIADDR_INVALID) - return - } - - if dest.ID == r.host.ID() { - fail(pb.CircuitRelay_HOP_CANT_RELAY_TO_SELF) - return - } - - if r.acl != nil && !r.acl.AllowHop(src.ID, dest.ID) { - log.Debugf("refusing hop from %s to %s; ACL refused", src.ID, dest.ID) - fail(pb.CircuitRelay_HOP_CANT_SPEAK_RELAY) - return - } - - r.mx.Lock() - if r.active >= r.rc.MaxCircuits { - r.mx.Unlock() - log.Debugf("refusing connection from %s to %s; too many active circuits", src.ID, dest.ID) - fail(pb.CircuitRelay_HOP_CANT_SPEAK_RELAY) - return - } - - srcConns := r.conns[src.ID] - if srcConns >= r.rc.MaxCircuitsPerPeer { - r.mx.Unlock() - log.Debugf("refusing connection from %s to %s; too many connections from %s", src.ID, dest.ID, src) - fail(pb.CircuitRelay_HOP_CANT_SPEAK_RELAY) - return - } - - destConns := r.conns[dest.ID] - if destConns >= r.rc.MaxCircuitsPerPeer { - r.mx.Unlock() - log.Debugf("refusing connection from %s to %s; too many connecitons to %s", src.ID, dest.ID, dest.ID) - fail(pb.CircuitRelay_HOP_CANT_SPEAK_RELAY) - return - } - - r.active++ - r.addConn(src.ID) - r.addConn(src.ID) - r.mx.Unlock() - - cleanup := func() { - span.Done() - r.mx.Lock() - r.active-- - r.rmConn(src.ID) - r.rmConn(dest.ID) - r.mx.Unlock() - } - - // open stream - ctx, cancel := context.WithTimeout(r.ctx, ConnectTimeout) - defer cancel() - - ctx = network.WithNoDial(ctx, "relay hop") - bs, err := r.host.NewStream(ctx, dest.ID, ProtoID) - if err != nil { - log.Debugf("error opening relay stream to %s: %s", dest.ID.Pretty(), err.Error()) - if err == network.ErrNoConn { - r.handleError(s, pb.CircuitRelay_HOP_NO_CONN_TO_DST) - } else { - r.handleError(s, pb.CircuitRelay_HOP_CANT_DIAL_DST) - } - cleanup() - return - } - - fail = func(code pb.CircuitRelay_Status) { - bs.Reset() - cleanup() - r.handleError(s, code) - } - - if err := bs.Scope().SetService(ServiceName); err != nil { - log.Debugf("error attaching stream to relay service: %s", err) - fail(pb.CircuitRelay_HOP_CANT_SPEAK_RELAY) - return - } - - // stop handshake - if err := bs.Scope().ReserveMemory(maxMessageSize, network.ReservationPriorityAlways); err != nil { - log.Debugf("failed to reserve memory for stream: %s", err) - fail(pb.CircuitRelay_HOP_CANT_SPEAK_RELAY) - return - } - defer bs.Scope().ReleaseMemory(maxMessageSize) - - rd := util.NewDelimitedReader(bs, maxMessageSize) - wr := util.NewDelimitedWriter(bs) - defer rd.Close() - - // set handshake deadline - bs.SetDeadline(time.Now().Add(HandshakeTimeout)) - - msg.Type = pb.CircuitRelay_STOP.Enum() - - err = wr.WriteMsg(msg) - if err != nil { - log.Debugf("error writing stop handshake: %s", err.Error()) - fail(pb.CircuitRelay_HOP_CANT_OPEN_DST_STREAM) - return - } - - msg.Reset() - - err = rd.ReadMsg(msg) - if err != nil { - log.Debugf("error reading stop response: %s", err.Error()) - fail(pb.CircuitRelay_HOP_CANT_OPEN_DST_STREAM) - return - } - - if msg.GetType() != pb.CircuitRelay_STATUS { - log.Debugf("unexpected relay stop response: not a status message (%d)", msg.GetType()) - fail(pb.CircuitRelay_HOP_CANT_OPEN_DST_STREAM) - return - } - - if msg.GetCode() != pb.CircuitRelay_SUCCESS { - log.Debugf("relay stop failure: %d", msg.GetCode()) - fail(msg.GetCode()) - return - } - - err = r.writeResponse(s, pb.CircuitRelay_SUCCESS) - if err != nil { - log.Debugf("error writing relay response: %s", err.Error()) - bs.Reset() - s.Reset() - cleanup() - return - } - - // relay connection - log.Infof("relaying connection between %s and %s", src.ID.Pretty(), dest.ID.Pretty()) - - // reset deadline - bs.SetDeadline(time.Time{}) - - goroutines := new(int32) - *goroutines = 2 - done := func() { - if atomic.AddInt32(goroutines, -1) == 0 { - s.Close() - bs.Close() - cleanup() - } - } - - go r.relayConn(s, bs, src.ID, dest.ID, done) - go r.relayConn(bs, s, dest.ID, src.ID, done) -} - -func (r *Relay) addConn(p peer.ID) { - conns := r.conns[p] - conns++ - r.conns[p] = conns - if conns == 1 { - r.host.ConnManager().TagPeer(p, relayHopTag, relayHopTagValue) - } -} - -func (r *Relay) rmConn(p peer.ID) { - conns := r.conns[p] - conns-- - if conns > 0 { - r.conns[p] = conns - } else { - delete(r.conns, p) - r.host.ConnManager().UntagPeer(p, relayHopTag) - } -} - -func (r *Relay) relayConn(src, dest network.Stream, srcID, destID peer.ID, done func()) { - defer done() - - buf := pool.Get(r.rc.BufferSize) - defer pool.Put(buf) - - count, err := io.CopyBuffer(dest, src, buf) - if err != nil { - log.Debugf("relay copy error: %s", err) - // Reset both. - src.Reset() - dest.Reset() - } else { - // propagate the close - dest.CloseWrite() - } - - log.Debugf("relayed %d bytes from %s to %s", count, srcID, destID) -} - -func (r *Relay) handleCanHop(s network.Stream, msg *pb.CircuitRelay) { - err := r.writeResponse(s, pb.CircuitRelay_SUCCESS) - - if err != nil { - s.Reset() - log.Debugf("error writing relay response: %s", err.Error()) - } else { - s.Close() - } -} - -func (r *Relay) handleError(s network.Stream, code pb.CircuitRelay_Status) { - log.Warnf("relay error: %s", code) - err := r.writeResponse(s, code) - if err != nil { - s.Reset() - log.Debugf("error writing relay response: %s", err.Error()) - } else { - s.Close() - } -} - -// Queries a peer for support of hop relay -func CanHop(ctx context.Context, host host.Host, id peer.ID) (bool, error) { - s, err := host.NewStream(ctx, id, ProtoID) - if err != nil { - return false, err - } - defer s.Close() - - rd := util.NewDelimitedReader(s, maxMessageSize) - wr := util.NewDelimitedWriter(s) - defer rd.Close() - - var msg pb.CircuitRelay - - msg.Type = pb.CircuitRelay_CAN_HOP.Enum() - - if err := wr.WriteMsg(&msg); err != nil { - s.Reset() - return false, err - } - - msg.Reset() - - if err := rd.ReadMsg(&msg); err != nil { - s.Reset() - return false, err - } - - if msg.GetType() != pb.CircuitRelay_STATUS { - return false, fmt.Errorf("unexpected relay response; not a status message (%d)", msg.GetType()) - } - - return msg.GetCode() == pb.CircuitRelay_SUCCESS, nil -} - -func (r *Relay) writeResponse(s network.Stream, code pb.CircuitRelay_Status) error { - wr := util.NewDelimitedWriter(s) - - var msg pb.CircuitRelay - msg.Type = pb.CircuitRelay_STATUS.Enum() - msg.Code = code.Enum() - - return wr.WriteMsg(&msg) -} - -func peerToPeerInfo(p *pb.CircuitRelay_Peer) (peer.AddrInfo, error) { - if p == nil { - return peer.AddrInfo{}, fmt.Errorf("nil peer") - } - - id, err := peer.IDFromBytes(p.Id) - if err != nil { - return peer.AddrInfo{}, err - } - - addrs := make([]ma.Multiaddr, 0, len(p.Addrs)) - for _, addrBytes := range p.Addrs { - a, err := ma.NewMultiaddrBytes(addrBytes) - if err == nil { - addrs = append(addrs, a) - } - } - - return peer.AddrInfo{ID: id, Addrs: addrs}, nil -} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/client.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/client.go index aa302e7e..c22436bc 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/client.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/client.go @@ -66,13 +66,11 @@ func New(h host.Host, upgrader transport.Upgrader) (*Client, error) { // Start registers the circuit (client) protocol stream handlers func (c *Client) Start() { - c.host.SetStreamHandler(proto.ProtoIDv1, c.handleStreamV1) c.host.SetStreamHandler(proto.ProtoIDv2Stop, c.handleStreamV2) } func (c *Client) Close() error { c.ctxCancel() - c.host.RemoveStreamHandler(proto.ProtoIDv1) c.host.RemoveStreamHandler(proto.ProtoIDv2Stop) return nil } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/conn.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/conn.go index 23bde93d..ed01be3b 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/conn.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/conn.go @@ -7,6 +7,7 @@ import ( "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" + tpt "github.com/libp2p/go-libp2p/core/transport" ma "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" @@ -79,7 +80,7 @@ func (c *Conn) RemoteMultiaddr() ma.Multiaddr { // TODO: We should be able to do this directly without converting to/from a string. relayAddr, err := ma.NewComponent( ma.ProtocolWithCode(ma.P_P2P).Name, - c.stream.Conn().RemotePeer().Pretty(), + c.stream.Conn().RemotePeer().String(), ) if err != nil { panic(err) @@ -102,8 +103,8 @@ func (c *Conn) LocalAddr() net.Addr { func (c *Conn) RemoteAddr() net.Addr { return &NetAddr{ - Relay: c.stream.Conn().RemotePeer().Pretty(), - Remote: c.remote.ID.Pretty(), + Relay: c.stream.Conn().RemotePeer().String(), + Remote: c.remote.ID.String(), } } @@ -143,3 +144,20 @@ func (c *Conn) untagHop() { delete(c.client.hopCount, p) } } + +type capableConnWithStat interface { + tpt.CapableConn + network.ConnStat +} + +type capableConn struct { + capableConnWithStat +} + +var transportName = ma.ProtocolWithCode(ma.P_CIRCUIT).Name + +func (c capableConn) ConnState() network.ConnectionState { + return network.ConnectionState{ + Transport: transportName, + } +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/dial.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/dial.go index 2e5fc73b..ecf5d3a5 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/dial.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/dial.go @@ -8,7 +8,6 @@ import ( "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" - pbv1 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb" pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb" "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto" "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util" @@ -124,25 +123,14 @@ func (c *Client) dialPeer(ctx context.Context, relay, dest peer.AddrInfo) (*Conn dialCtx, cancel := context.WithTimeout(ctx, DialRelayTimeout) defer cancel() - s, err := c.host.NewStream(dialCtx, relay.ID, proto.ProtoIDv2Hop, proto.ProtoIDv1) + s, err := c.host.NewStream(dialCtx, relay.ID, proto.ProtoIDv2Hop) if err != nil { return nil, fmt.Errorf("error opening hop stream to relay: %w", err) } - - switch s.Protocol() { - case proto.ProtoIDv2Hop: - return c.connectV2(s, dest) - - case proto.ProtoIDv1: - return c.connectV1(s, dest) - - default: - s.Reset() - return nil, fmt.Errorf("unexpected stream protocol: %s", s.Protocol()) - } + return c.connect(s, dest) } -func (c *Client) connectV2(s network.Stream, dest peer.AddrInfo) (*Conn, error) { +func (c *Client) connect(s network.Stream, dest peer.AddrInfo) (*Conn, error) { if err := s.Scope().ReserveMemory(maxMessageSize, network.ReservationPriorityAlways); err != nil { s.Reset() return nil, err @@ -199,52 +187,3 @@ func (c *Client) connectV2(s network.Stream, dest peer.AddrInfo) (*Conn, error) return &Conn{stream: s, remote: dest, stat: stat, client: c}, nil } - -func (c *Client) connectV1(s network.Stream, dest peer.AddrInfo) (*Conn, error) { - if err := s.Scope().ReserveMemory(maxMessageSize, network.ReservationPriorityAlways); err != nil { - s.Reset() - return nil, err - } - defer s.Scope().ReleaseMemory(maxMessageSize) - - rd := util.NewDelimitedReader(s, maxMessageSize) - wr := util.NewDelimitedWriter(s) - defer rd.Close() - - var msg pbv1.CircuitRelay - - msg.Type = pbv1.CircuitRelay_HOP.Enum() - msg.SrcPeer = util.PeerInfoToPeerV1(c.host.Peerstore().PeerInfo(c.host.ID())) - msg.DstPeer = util.PeerInfoToPeerV1(dest) - - s.SetDeadline(time.Now().Add(DialTimeout)) - - err := wr.WriteMsg(&msg) - if err != nil { - s.Reset() - return nil, err - } - - msg.Reset() - - err = rd.ReadMsg(&msg) - if err != nil { - s.Reset() - return nil, err - } - - s.SetDeadline(time.Time{}) - - if msg.GetType() != pbv1.CircuitRelay_STATUS { - s.Reset() - return nil, newRelayError("unexpected relay response; not a status message (%d)", msg.GetType()) - } - - status := msg.GetCode() - if status != pbv1.CircuitRelay_SUCCESS { - s.Reset() - return nil, newRelayError("error opening relay circuit: %s (%d)", pbv1.CircuitRelay_Status_name[int32(status)], status) - } - - return &Conn{stream: s, remote: dest, client: c}, nil -} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/handlers.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/handlers.go index ef50b8e8..6b5361b1 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/handlers.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/handlers.go @@ -4,7 +4,6 @@ import ( "time" "github.com/libp2p/go-libp2p/core/network" - pbv1 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb" pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb" "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util" ) @@ -87,85 +86,3 @@ func (c *Client) handleStreamV2(s network.Stream) { handleError(pbv2.Status_CONNECTION_FAILED) } } - -func (c *Client) handleStreamV1(s network.Stream) { - log.Debugf("new relay/v1 stream from: %s", s.Conn().RemotePeer()) - - s.SetReadDeadline(time.Now().Add(StreamTimeout)) - - rd := util.NewDelimitedReader(s, maxMessageSize) - defer rd.Close() - - writeResponse := func(status pbv1.CircuitRelay_Status) error { - wr := util.NewDelimitedWriter(s) - - var msg pbv1.CircuitRelay - msg.Type = pbv1.CircuitRelay_STATUS.Enum() - msg.Code = status.Enum() - - return wr.WriteMsg(&msg) - } - - handleError := func(status pbv1.CircuitRelay_Status) { - log.Debugf("protocol error: %s (%d)", pbv1.CircuitRelay_Status_name[int32(status)], status) - err := writeResponse(status) - if err != nil { - s.Reset() - log.Debugf("error writing circuit response: %s", err.Error()) - } else { - s.Close() - } - } - - var msg pbv1.CircuitRelay - - err := rd.ReadMsg(&msg) - if err != nil { - handleError(pbv1.CircuitRelay_MALFORMED_MESSAGE) - return - } - // reset stream deadline as message has been read - s.SetReadDeadline(time.Time{}) - - switch msg.GetType() { - case pbv1.CircuitRelay_STOP: - - case pbv1.CircuitRelay_HOP: - handleError(pbv1.CircuitRelay_HOP_CANT_SPEAK_RELAY) - return - - case pbv1.CircuitRelay_CAN_HOP: - handleError(pbv1.CircuitRelay_HOP_CANT_SPEAK_RELAY) - return - - default: - log.Debugf("unexpected relay handshake: %d", msg.GetType()) - handleError(pbv1.CircuitRelay_MALFORMED_MESSAGE) - return - } - - src, err := util.PeerToPeerInfoV1(msg.GetSrcPeer()) - if err != nil { - handleError(pbv1.CircuitRelay_STOP_SRC_MULTIADDR_INVALID) - return - } - - dst, err := util.PeerToPeerInfoV1(msg.GetDstPeer()) - if err != nil || dst.ID != c.host.ID() { - handleError(pbv1.CircuitRelay_STOP_DST_MULTIADDR_INVALID) - return - } - - log.Debugf("incoming relay connection from: %s", src.ID) - - select { - case c.incoming <- accept{ - conn: &Conn{stream: s, remote: src, client: c}, - writeResponse: func() error { - return writeResponse(pbv1.CircuitRelay_SUCCESS) - }, - }: - case <-time.After(AcceptTimeout): - handleError(pbv1.CircuitRelay_STOP_RELAY_REFUSED) - } -} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/reservation.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/reservation.go index 1cd451ad..dbb92419 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/reservation.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/reservation.go @@ -37,6 +37,27 @@ type Reservation struct { Voucher *proto.ReservationVoucher } +// ReservationError is the error returned on failure to reserve a slot in the relay +type ReservationError struct { + + // Status is the status returned by the relay for rejecting the reservation + // request. It is set to pbv2.Status_CONNECTION_FAILED on other failures + Status pbv2.Status + + // Reason is the reason for reservation failure + Reason string + + err error +} + +func (re ReservationError) Error() string { + return fmt.Sprintf("reservation error: status: %s reason: %s err: %s", pbv2.Status_name[int32(re.Status)], re.Reason, re.err) +} + +func (re ReservationError) Unwrap() error { + return re.err +} + // Reserve reserves a slot in a relay and returns the reservation information. // Clients must reserve slots in order for the relay to relay connections to them. func Reserve(ctx context.Context, h host.Host, ai peer.AddrInfo) (*Reservation, error) { @@ -46,7 +67,7 @@ func Reserve(ctx context.Context, h host.Host, ai peer.AddrInfo) (*Reservation, s, err := h.NewStream(ctx, ai.ID, proto.ProtoIDv2Hop) if err != nil { - return nil, err + return nil, ReservationError{Status: pbv2.Status_CONNECTION_FAILED, Reason: "failed to open stream", err: err} } defer s.Close() @@ -61,33 +82,39 @@ func Reserve(ctx context.Context, h host.Host, ai peer.AddrInfo) (*Reservation, if err := wr.WriteMsg(&msg); err != nil { s.Reset() - return nil, fmt.Errorf("error writing reservation message: %w", err) + return nil, ReservationError{Status: pbv2.Status_CONNECTION_FAILED, Reason: "error writing reservation message", err: err} } msg.Reset() if err := rd.ReadMsg(&msg); err != nil { s.Reset() - return nil, fmt.Errorf("error reading reservation response message: %w", err) + return nil, ReservationError{Status: pbv2.Status_CONNECTION_FAILED, Reason: "error reading reservation response message: %w", err: err} } if msg.GetType() != pbv2.HopMessage_STATUS { - return nil, fmt.Errorf("unexpected relay response: not a status message (%d)", msg.GetType()) + return nil, ReservationError{ + Status: pbv2.Status_MALFORMED_MESSAGE, + Reason: fmt.Sprintf("unexpected relay response: not a status message (%d)", msg.GetType()), + err: err} } if status := msg.GetStatus(); status != pbv2.Status_OK { - return nil, fmt.Errorf("reservation failed: %s (%d)", pbv2.Status_name[int32(status)], status) + return nil, ReservationError{Status: msg.GetStatus(), Reason: "reservation failed"} } rsvp := msg.GetReservation() if rsvp == nil { - return nil, fmt.Errorf("missing reservation info") + return nil, ReservationError{Status: pbv2.Status_MALFORMED_MESSAGE, Reason: "missing reservation info"} } result := &Reservation{} result.Expiration = time.Unix(int64(rsvp.GetExpire()), 0) if result.Expiration.Before(time.Now()) { - return nil, fmt.Errorf("received reservation with expiration date in the past: %s", result.Expiration) + return nil, ReservationError{ + Status: pbv2.Status_MALFORMED_MESSAGE, + Reason: fmt.Sprintf("received reservation with expiration date in the past: %s", result.Expiration), + } } addrs := rsvp.GetAddrs() @@ -105,12 +132,19 @@ func Reserve(ctx context.Context, h host.Host, ai peer.AddrInfo) (*Reservation, if voucherBytes != nil { _, rec, err := record.ConsumeEnvelope(voucherBytes, proto.RecordDomain) if err != nil { - return nil, fmt.Errorf("error consuming voucher envelope: %w", err) + return nil, ReservationError{ + Status: pbv2.Status_MALFORMED_MESSAGE, + Reason: fmt.Sprintf("error consuming voucher envelope: %s", err), + err: err, + } } voucher, ok := rec.(*proto.ReservationVoucher) if !ok { - return nil, fmt.Errorf("unexpected voucher record type: %+T", rec) + return nil, ReservationError{ + Status: pbv2.Status_MALFORMED_MESSAGE, + Reason: fmt.Sprintf("unexpected voucher record type: %+T", rec), + } } result.Voucher = voucher } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/transport.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/transport.go index 97fc1ce1..e08d5570 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/transport.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client/transport.go @@ -53,17 +53,28 @@ func (c *Client) Dial(ctx context.Context, a ma.Multiaddr, p peer.ID) (transport if err != nil { return nil, err } - if err := connScope.SetPeer(p); err != nil { + conn, err := c.dialAndUpgrade(ctx, a, p, connScope) + if err != nil { connScope.Done() return nil, err } + return conn, nil +} + +func (c *Client) dialAndUpgrade(ctx context.Context, a ma.Multiaddr, p peer.ID, connScope network.ConnManagementScope) (transport.CapableConn, error) { + if err := connScope.SetPeer(p); err != nil { + return nil, err + } conn, err := c.dial(ctx, a, p) if err != nil { - connScope.Done() return nil, err } conn.tagHop() - return c.upgrader.Upgrade(ctx, c, conn, network.DirOutbound, p, connScope) + cc, err := c.upgrader.Upgrade(ctx, c, conn, network.DirOutbound, p, connScope) + if err != nil { + return nil, err + } + return capableConn{cc.(capableConnWithStat)}, nil } func (c *Client) CanDial(addr ma.Multiaddr) bool { diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/Makefile b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/Makefile deleted file mode 100644 index c360a6fb..00000000 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -PB = $(wildcard *.proto) -GO = $(PB:.proto=.pb.go) - -all: $(GO) - -%.pb.go: %.proto - protoc --gogofast_out=. $< - -clean: - rm -f *.pb.go - rm -f *.go diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/circuit.pb.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/circuit.pb.go index 9cbff1ac..d4d285a3 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/circuit.pb.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/circuit.pb.go @@ -1,31 +1,30 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: circuit.proto +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: pb/circuit.proto -package circuit_pb +package pb import ( - fmt "fmt" - github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Status int32 const ( + // zero value field required for proto3 compatibility + Status_UNUSED Status = 0 Status_OK Status = 100 Status_RESERVATION_REFUSED Status = 200 Status_RESOURCE_LIMIT_EXCEEDED Status = 201 @@ -36,27 +35,31 @@ const ( Status_UNEXPECTED_MESSAGE Status = 401 ) -var Status_name = map[int32]string{ - 100: "OK", - 200: "RESERVATION_REFUSED", - 201: "RESOURCE_LIMIT_EXCEEDED", - 202: "PERMISSION_DENIED", - 203: "CONNECTION_FAILED", - 204: "NO_RESERVATION", - 400: "MALFORMED_MESSAGE", - 401: "UNEXPECTED_MESSAGE", -} - -var Status_value = map[string]int32{ - "OK": 100, - "RESERVATION_REFUSED": 200, - "RESOURCE_LIMIT_EXCEEDED": 201, - "PERMISSION_DENIED": 202, - "CONNECTION_FAILED": 203, - "NO_RESERVATION": 204, - "MALFORMED_MESSAGE": 400, - "UNEXPECTED_MESSAGE": 401, -} +// Enum value maps for Status. +var ( + Status_name = map[int32]string{ + 0: "UNUSED", + 100: "OK", + 200: "RESERVATION_REFUSED", + 201: "RESOURCE_LIMIT_EXCEEDED", + 202: "PERMISSION_DENIED", + 203: "CONNECTION_FAILED", + 204: "NO_RESERVATION", + 400: "MALFORMED_MESSAGE", + 401: "UNEXPECTED_MESSAGE", + } + Status_value = map[string]int32{ + "UNUSED": 0, + "OK": 100, + "RESERVATION_REFUSED": 200, + "RESOURCE_LIMIT_EXCEEDED": 201, + "PERMISSION_DENIED": 202, + "CONNECTION_FAILED": 203, + "NO_RESERVATION": 204, + "MALFORMED_MESSAGE": 400, + "UNEXPECTED_MESSAGE": 401, + } +) func (x Status) Enum() *Status { p := new(Status) @@ -65,20 +68,24 @@ func (x Status) Enum() *Status { } func (x Status) String() string { - return proto.EnumName(Status_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -func (x *Status) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Status_value, data, "Status") - if err != nil { - return err - } - *x = Status(value) - return nil +func (Status) Descriptor() protoreflect.EnumDescriptor { + return file_pb_circuit_proto_enumTypes[0].Descriptor() } +func (Status) Type() protoreflect.EnumType { + return &file_pb_circuit_proto_enumTypes[0] +} + +func (x Status) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Status.Descriptor instead. func (Status) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ed01bbc211f15e47, []int{0} + return file_pb_circuit_proto_rawDescGZIP(), []int{0} } type HopMessage_Type int32 @@ -89,17 +96,19 @@ const ( HopMessage_STATUS HopMessage_Type = 2 ) -var HopMessage_Type_name = map[int32]string{ - 0: "RESERVE", - 1: "CONNECT", - 2: "STATUS", -} - -var HopMessage_Type_value = map[string]int32{ - "RESERVE": 0, - "CONNECT": 1, - "STATUS": 2, -} +// Enum value maps for HopMessage_Type. +var ( + HopMessage_Type_name = map[int32]string{ + 0: "RESERVE", + 1: "CONNECT", + 2: "STATUS", + } + HopMessage_Type_value = map[string]int32{ + "RESERVE": 0, + "CONNECT": 1, + "STATUS": 2, + } +) func (x HopMessage_Type) Enum() *HopMessage_Type { p := new(HopMessage_Type) @@ -108,20 +117,24 @@ func (x HopMessage_Type) Enum() *HopMessage_Type { } func (x HopMessage_Type) String() string { - return proto.EnumName(HopMessage_Type_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -func (x *HopMessage_Type) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(HopMessage_Type_value, data, "HopMessage_Type") - if err != nil { - return err - } - *x = HopMessage_Type(value) - return nil +func (HopMessage_Type) Descriptor() protoreflect.EnumDescriptor { + return file_pb_circuit_proto_enumTypes[1].Descriptor() } +func (HopMessage_Type) Type() protoreflect.EnumType { + return &file_pb_circuit_proto_enumTypes[1] +} + +func (x HopMessage_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HopMessage_Type.Descriptor instead. func (HopMessage_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ed01bbc211f15e47, []int{0, 0} + return file_pb_circuit_proto_rawDescGZIP(), []int{0, 0} } type StopMessage_Type int32 @@ -131,15 +144,17 @@ const ( StopMessage_STATUS StopMessage_Type = 1 ) -var StopMessage_Type_name = map[int32]string{ - 0: "CONNECT", - 1: "STATUS", -} - -var StopMessage_Type_value = map[string]int32{ - "CONNECT": 0, - "STATUS": 1, -} +// Enum value maps for StopMessage_Type. +var ( + StopMessage_Type_name = map[int32]string{ + 0: "CONNECT", + 1: "STATUS", + } + StopMessage_Type_value = map[string]int32{ + "CONNECT": 0, + "STATUS": 1, + } +) func (x StopMessage_Type) Enum() *StopMessage_Type { p := new(StopMessage_Type) @@ -148,1611 +163,565 @@ func (x StopMessage_Type) Enum() *StopMessage_Type { } func (x StopMessage_Type) String() string { - return proto.EnumName(StopMessage_Type_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -func (x *StopMessage_Type) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(StopMessage_Type_value, data, "StopMessage_Type") - if err != nil { - return err - } - *x = StopMessage_Type(value) - return nil +func (StopMessage_Type) Descriptor() protoreflect.EnumDescriptor { + return file_pb_circuit_proto_enumTypes[2].Descriptor() } -func (StopMessage_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ed01bbc211f15e47, []int{1, 0} +func (StopMessage_Type) Type() protoreflect.EnumType { + return &file_pb_circuit_proto_enumTypes[2] } -type HopMessage struct { - Type *HopMessage_Type `protobuf:"varint,1,req,name=type,enum=circuit.pb.HopMessage_Type" json:"type,omitempty"` - Peer *Peer `protobuf:"bytes,2,opt,name=peer" json:"peer,omitempty"` - Reservation *Reservation `protobuf:"bytes,3,opt,name=reservation" json:"reservation,omitempty"` - Limit *Limit `protobuf:"bytes,4,opt,name=limit" json:"limit,omitempty"` - Status *Status `protobuf:"varint,5,opt,name=status,enum=circuit.pb.Status" json:"status,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HopMessage) Reset() { *m = HopMessage{} } -func (m *HopMessage) String() string { return proto.CompactTextString(m) } -func (*HopMessage) ProtoMessage() {} -func (*HopMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_ed01bbc211f15e47, []int{0} -} -func (m *HopMessage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HopMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HopMessage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HopMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_HopMessage.Merge(m, src) -} -func (m *HopMessage) XXX_Size() int { - return m.Size() -} -func (m *HopMessage) XXX_DiscardUnknown() { - xxx_messageInfo_HopMessage.DiscardUnknown(m) +func (x StopMessage_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) } -var xxx_messageInfo_HopMessage proto.InternalMessageInfo - -func (m *HopMessage) GetType() HopMessage_Type { - if m != nil && m.Type != nil { - return *m.Type - } - return HopMessage_RESERVE +// Deprecated: Use StopMessage_Type.Descriptor instead. +func (StopMessage_Type) EnumDescriptor() ([]byte, []int) { + return file_pb_circuit_proto_rawDescGZIP(), []int{1, 0} } -func (m *HopMessage) GetPeer() *Peer { - if m != nil { - return m.Peer - } - return nil -} +type HopMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *HopMessage) GetReservation() *Reservation { - if m != nil { - return m.Reservation - } - return nil + // This field is marked optional for backwards compatibility with proto2. + // Users should make sure to always set this. + Type *HopMessage_Type `protobuf:"varint,1,opt,name=type,proto3,enum=circuit.pb.HopMessage_Type,oneof" json:"type,omitempty"` + Peer *Peer `protobuf:"bytes,2,opt,name=peer,proto3,oneof" json:"peer,omitempty"` + Reservation *Reservation `protobuf:"bytes,3,opt,name=reservation,proto3,oneof" json:"reservation,omitempty"` + Limit *Limit `protobuf:"bytes,4,opt,name=limit,proto3,oneof" json:"limit,omitempty"` + Status *Status `protobuf:"varint,5,opt,name=status,proto3,enum=circuit.pb.Status,oneof" json:"status,omitempty"` } -func (m *HopMessage) GetLimit() *Limit { - if m != nil { - return m.Limit +func (x *HopMessage) Reset() { + *x = HopMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_circuit_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -func (m *HopMessage) GetStatus() Status { - if m != nil && m.Status != nil { - return *m.Status - } - return Status_OK +func (x *HopMessage) String() string { + return protoimpl.X.MessageStringOf(x) } -type StopMessage struct { - Type *StopMessage_Type `protobuf:"varint,1,req,name=type,enum=circuit.pb.StopMessage_Type" json:"type,omitempty"` - Peer *Peer `protobuf:"bytes,2,opt,name=peer" json:"peer,omitempty"` - Limit *Limit `protobuf:"bytes,3,opt,name=limit" json:"limit,omitempty"` - Status *Status `protobuf:"varint,4,opt,name=status,enum=circuit.pb.Status" json:"status,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StopMessage) Reset() { *m = StopMessage{} } -func (m *StopMessage) String() string { return proto.CompactTextString(m) } -func (*StopMessage) ProtoMessage() {} -func (*StopMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_ed01bbc211f15e47, []int{1} -} -func (m *StopMessage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StopMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StopMessage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*HopMessage) ProtoMessage() {} + +func (x *HopMessage) ProtoReflect() protoreflect.Message { + mi := &file_pb_circuit_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *StopMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_StopMessage.Merge(m, src) -} -func (m *StopMessage) XXX_Size() int { - return m.Size() -} -func (m *StopMessage) XXX_DiscardUnknown() { - xxx_messageInfo_StopMessage.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_StopMessage proto.InternalMessageInfo +// Deprecated: Use HopMessage.ProtoReflect.Descriptor instead. +func (*HopMessage) Descriptor() ([]byte, []int) { + return file_pb_circuit_proto_rawDescGZIP(), []int{0} +} -func (m *StopMessage) GetType() StopMessage_Type { - if m != nil && m.Type != nil { - return *m.Type +func (x *HopMessage) GetType() HopMessage_Type { + if x != nil && x.Type != nil { + return *x.Type } - return StopMessage_CONNECT + return HopMessage_RESERVE } -func (m *StopMessage) GetPeer() *Peer { - if m != nil { - return m.Peer +func (x *HopMessage) GetPeer() *Peer { + if x != nil { + return x.Peer } return nil } -func (m *StopMessage) GetLimit() *Limit { - if m != nil { - return m.Limit +func (x *HopMessage) GetReservation() *Reservation { + if x != nil { + return x.Reservation } return nil } -func (m *StopMessage) GetStatus() Status { - if m != nil && m.Status != nil { - return *m.Status +func (x *HopMessage) GetLimit() *Limit { + if x != nil { + return x.Limit } - return Status_OK -} - -type Peer struct { - Id []byte `protobuf:"bytes,1,req,name=id" json:"id,omitempty"` - Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs" json:"addrs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + return nil } -func (m *Peer) Reset() { *m = Peer{} } -func (m *Peer) String() string { return proto.CompactTextString(m) } -func (*Peer) ProtoMessage() {} -func (*Peer) Descriptor() ([]byte, []int) { - return fileDescriptor_ed01bbc211f15e47, []int{2} -} -func (m *Peer) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Peer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Peer.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *HopMessage) GetStatus() Status { + if x != nil && x.Status != nil { + return *x.Status } -} -func (m *Peer) XXX_Merge(src proto.Message) { - xxx_messageInfo_Peer.Merge(m, src) -} -func (m *Peer) XXX_Size() int { - return m.Size() -} -func (m *Peer) XXX_DiscardUnknown() { - xxx_messageInfo_Peer.DiscardUnknown(m) + return Status_UNUSED } -var xxx_messageInfo_Peer proto.InternalMessageInfo +type StopMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Peer) GetId() []byte { - if m != nil { - return m.Id - } - return nil + // This field is marked optional for backwards compatibility with proto2. + // Users should make sure to always set this. + Type *StopMessage_Type `protobuf:"varint,1,opt,name=type,proto3,enum=circuit.pb.StopMessage_Type,oneof" json:"type,omitempty"` + Peer *Peer `protobuf:"bytes,2,opt,name=peer,proto3,oneof" json:"peer,omitempty"` + Limit *Limit `protobuf:"bytes,3,opt,name=limit,proto3,oneof" json:"limit,omitempty"` + Status *Status `protobuf:"varint,4,opt,name=status,proto3,enum=circuit.pb.Status,oneof" json:"status,omitempty"` } -func (m *Peer) GetAddrs() [][]byte { - if m != nil { - return m.Addrs +func (x *StopMessage) Reset() { + *x = StopMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_circuit_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -type Reservation struct { - Expire *uint64 `protobuf:"varint,1,req,name=expire" json:"expire,omitempty"` - Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs" json:"addrs,omitempty"` - Voucher []byte `protobuf:"bytes,3,opt,name=voucher" json:"voucher,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *StopMessage) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Reservation) Reset() { *m = Reservation{} } -func (m *Reservation) String() string { return proto.CompactTextString(m) } -func (*Reservation) ProtoMessage() {} -func (*Reservation) Descriptor() ([]byte, []int) { - return fileDescriptor_ed01bbc211f15e47, []int{3} -} -func (m *Reservation) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Reservation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Reservation.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*StopMessage) ProtoMessage() {} + +func (x *StopMessage) ProtoReflect() protoreflect.Message { + mi := &file_pb_circuit_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *Reservation) XXX_Merge(src proto.Message) { - xxx_messageInfo_Reservation.Merge(m, src) -} -func (m *Reservation) XXX_Size() int { - return m.Size() -} -func (m *Reservation) XXX_DiscardUnknown() { - xxx_messageInfo_Reservation.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_Reservation proto.InternalMessageInfo +// Deprecated: Use StopMessage.ProtoReflect.Descriptor instead. +func (*StopMessage) Descriptor() ([]byte, []int) { + return file_pb_circuit_proto_rawDescGZIP(), []int{1} +} -func (m *Reservation) GetExpire() uint64 { - if m != nil && m.Expire != nil { - return *m.Expire +func (x *StopMessage) GetType() StopMessage_Type { + if x != nil && x.Type != nil { + return *x.Type } - return 0 + return StopMessage_CONNECT } -func (m *Reservation) GetAddrs() [][]byte { - if m != nil { - return m.Addrs +func (x *StopMessage) GetPeer() *Peer { + if x != nil { + return x.Peer } return nil } -func (m *Reservation) GetVoucher() []byte { - if m != nil { - return m.Voucher +func (x *StopMessage) GetLimit() *Limit { + if x != nil { + return x.Limit } return nil } -type Limit struct { - Duration *uint32 `protobuf:"varint,1,opt,name=duration" json:"duration,omitempty"` - Data *uint64 `protobuf:"varint,2,opt,name=data" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Limit) Reset() { *m = Limit{} } -func (m *Limit) String() string { return proto.CompactTextString(m) } -func (*Limit) ProtoMessage() {} -func (*Limit) Descriptor() ([]byte, []int) { - return fileDescriptor_ed01bbc211f15e47, []int{4} -} -func (m *Limit) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Limit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Limit.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *StopMessage) GetStatus() Status { + if x != nil && x.Status != nil { + return *x.Status } -} -func (m *Limit) XXX_Merge(src proto.Message) { - xxx_messageInfo_Limit.Merge(m, src) -} -func (m *Limit) XXX_Size() int { - return m.Size() -} -func (m *Limit) XXX_DiscardUnknown() { - xxx_messageInfo_Limit.DiscardUnknown(m) + return Status_UNUSED } -var xxx_messageInfo_Limit proto.InternalMessageInfo +type Peer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Limit) GetDuration() uint32 { - if m != nil && m.Duration != nil { - return *m.Duration - } - return 0 + // This field is marked optional for backwards compatibility with proto2. + // Users should make sure to always set this. + Id []byte `protobuf:"bytes,1,opt,name=id,proto3,oneof" json:"id,omitempty"` + Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs,proto3" json:"addrs,omitempty"` } -func (m *Limit) GetData() uint64 { - if m != nil && m.Data != nil { - return *m.Data +func (x *Peer) Reset() { + *x = Peer{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_circuit_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return 0 } -func init() { - proto.RegisterEnum("circuit.pb.Status", Status_name, Status_value) - proto.RegisterEnum("circuit.pb.HopMessage_Type", HopMessage_Type_name, HopMessage_Type_value) - proto.RegisterEnum("circuit.pb.StopMessage_Type", StopMessage_Type_name, StopMessage_Type_value) - proto.RegisterType((*HopMessage)(nil), "circuit.pb.HopMessage") - proto.RegisterType((*StopMessage)(nil), "circuit.pb.StopMessage") - proto.RegisterType((*Peer)(nil), "circuit.pb.Peer") - proto.RegisterType((*Reservation)(nil), "circuit.pb.Reservation") - proto.RegisterType((*Limit)(nil), "circuit.pb.Limit") -} - -func init() { proto.RegisterFile("circuit.proto", fileDescriptor_ed01bbc211f15e47) } - -var fileDescriptor_ed01bbc211f15e47 = []byte{ - // 514 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x92, 0xcf, 0x8a, 0xd3, 0x50, - 0x18, 0xc5, 0xe7, 0xa6, 0x69, 0x47, 0xbe, 0x76, 0x4a, 0xe6, 0x1b, 0x99, 0x06, 0x1d, 0x6a, 0x29, - 0x82, 0x65, 0x90, 0x2a, 0xdd, 0x88, 0xcb, 0xda, 0x7c, 0xd5, 0x60, 0x93, 0x94, 0x7b, 0x53, 0x99, - 0x5d, 0x89, 0xcd, 0x45, 0x03, 0x6a, 0x43, 0x92, 0x0e, 0xce, 0x5b, 0xe8, 0x23, 0xf8, 0x22, 0xae, - 0xc7, 0x3f, 0x0b, 0xf7, 0x6e, 0xa4, 0x4f, 0x22, 0xb9, 0xe9, 0xb4, 0x19, 0x10, 0x14, 0xdc, 0xf5, - 0xdc, 0x73, 0x0e, 0xb7, 0xbf, 0x73, 0x03, 0x07, 0x8b, 0x28, 0x59, 0xac, 0xa2, 0xac, 0x1f, 0x27, - 0xcb, 0x6c, 0x89, 0xb0, 0x95, 0x2f, 0xbb, 0x9f, 0x34, 0x80, 0x67, 0xcb, 0xd8, 0x91, 0x69, 0x1a, - 0xbc, 0x92, 0xf8, 0x00, 0xf4, 0xec, 0x22, 0x96, 0x26, 0xeb, 0x68, 0xbd, 0xe6, 0xe0, 0x76, 0x7f, - 0x97, 0xec, 0xef, 0x52, 0x7d, 0xff, 0x22, 0x96, 0x5c, 0x05, 0xf1, 0x2e, 0xe8, 0xb1, 0x94, 0x89, - 0xa9, 0x75, 0x58, 0xaf, 0x3e, 0x30, 0xca, 0x85, 0xa9, 0x94, 0x09, 0x57, 0x2e, 0x3e, 0x86, 0x7a, - 0x22, 0x53, 0x99, 0x9c, 0x07, 0x59, 0xb4, 0x7c, 0x67, 0x56, 0x54, 0xb8, 0x55, 0x0e, 0xf3, 0x9d, - 0xcd, 0xcb, 0x59, 0xbc, 0x07, 0xd5, 0x37, 0xd1, 0xdb, 0x28, 0x33, 0x75, 0x55, 0x3a, 0x2c, 0x97, - 0x26, 0xb9, 0xc1, 0x0b, 0x1f, 0x4f, 0xa1, 0x96, 0x66, 0x41, 0xb6, 0x4a, 0xcd, 0x6a, 0x87, 0xf5, - 0x9a, 0x03, 0x2c, 0x27, 0x85, 0x72, 0xf8, 0x26, 0xd1, 0xbd, 0x0f, 0x7a, 0xce, 0x80, 0x75, 0xd8, - 0xe7, 0x24, 0x88, 0xbf, 0x20, 0x63, 0x2f, 0x17, 0x23, 0xcf, 0x75, 0x69, 0xe4, 0x1b, 0x0c, 0x01, - 0x6a, 0xc2, 0x1f, 0xfa, 0x33, 0x61, 0x68, 0xdd, 0x9f, 0x0c, 0xea, 0x22, 0xdb, 0x8d, 0xf4, 0xf0, - 0xda, 0x48, 0x27, 0xd7, 0xef, 0xf9, 0x8f, 0x95, 0xb6, 0xa8, 0x95, 0x7f, 0x46, 0xd5, 0xff, 0x8a, - 0x7a, 0x67, 0x87, 0x7a, 0x45, 0xb7, 0x57, 0xa2, 0x63, 0xf9, 0x16, 0xf9, 0x7f, 0xc0, 0x26, 0x68, - 0x51, 0xa8, 0x98, 0x1a, 0x5c, 0x8b, 0x42, 0xbc, 0x09, 0xd5, 0x20, 0x0c, 0x93, 0xd4, 0xd4, 0x3a, - 0x95, 0x5e, 0x83, 0x17, 0xa2, 0x3b, 0x83, 0x7a, 0xe9, 0xa9, 0xf0, 0x18, 0x6a, 0xf2, 0x7d, 0x1c, - 0x25, 0xc5, 0x18, 0x3a, 0xdf, 0xa8, 0x3f, 0x97, 0xd1, 0x84, 0xfd, 0xf3, 0xe5, 0x6a, 0xf1, 0x5a, - 0x26, 0x0a, 0xb1, 0xc1, 0xaf, 0x64, 0xf7, 0x11, 0x54, 0x15, 0x21, 0xde, 0x82, 0x1b, 0xe1, 0x2a, - 0x29, 0x3e, 0x13, 0xd6, 0x61, 0xbd, 0x03, 0xbe, 0xd5, 0x88, 0xa0, 0x87, 0x41, 0x16, 0xa8, 0x15, - 0x75, 0xae, 0x7e, 0x9f, 0x7e, 0x66, 0x50, 0x2b, 0x88, 0xb1, 0x06, 0x9a, 0xf7, 0xdc, 0x08, 0xd1, - 0x84, 0xa3, 0xe2, 0x51, 0x87, 0xbe, 0xed, 0xb9, 0x73, 0x4e, 0xe3, 0x99, 0x20, 0xcb, 0xb8, 0x64, - 0x78, 0x02, 0x2d, 0x4e, 0xc2, 0x9b, 0xf1, 0x11, 0xcd, 0x27, 0xb6, 0x63, 0xfb, 0x73, 0x3a, 0x1b, - 0x11, 0x59, 0x64, 0x19, 0x5f, 0x18, 0x1e, 0xc3, 0xe1, 0x94, 0xb8, 0x63, 0x0b, 0x91, 0xd7, 0x2c, - 0x72, 0x6d, 0xb2, 0x8c, 0xaf, 0xea, 0x7c, 0xb3, 0x5c, 0x7e, 0x3e, 0x1e, 0xda, 0x13, 0xb2, 0x8c, - 0x6f, 0x0c, 0x8f, 0xa0, 0xe9, 0x7a, 0xf3, 0xd2, 0x55, 0xc6, 0x77, 0x15, 0x76, 0x86, 0x93, 0xb1, - 0xc7, 0x1d, 0xb2, 0xe6, 0x0e, 0x09, 0x31, 0x7c, 0x4a, 0xc6, 0x87, 0x0a, 0xb6, 0x00, 0x67, 0x2e, - 0x9d, 0x4d, 0x69, 0xe4, 0x97, 0x8c, 0x8f, 0x95, 0x27, 0x8d, 0xcb, 0x75, 0x9b, 0xfd, 0x58, 0xb7, - 0xd9, 0xaf, 0x75, 0x9b, 0xfd, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x63, 0x19, 0x1e, 0x6c, 0xaa, 0x03, - 0x00, 0x00, -} - -func (m *HopMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (x *Peer) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *HopMessage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +func (*Peer) ProtoMessage() {} -func (m *HopMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Status != nil { - i = encodeVarintCircuit(dAtA, i, uint64(*m.Status)) - i-- - dAtA[i] = 0x28 - } - if m.Limit != nil { - { - size, err := m.Limit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCircuit(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.Reservation != nil { - { - size, err := m.Reservation.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCircuit(dAtA, i, uint64(size)) +func (x *Peer) ProtoReflect() protoreflect.Message { + mi := &file_pb_circuit_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - i-- - dAtA[i] = 0x1a + return ms } - if m.Peer != nil { - { - size, err := m.Peer.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCircuit(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Type == nil { - return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("type") - } else { - i = encodeVarintCircuit(dAtA, i, uint64(*m.Type)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil + return mi.MessageOf(x) } -func (m *StopMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StopMessage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use Peer.ProtoReflect.Descriptor instead. +func (*Peer) Descriptor() ([]byte, []int) { + return file_pb_circuit_proto_rawDescGZIP(), []int{2} } -func (m *StopMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Status != nil { - i = encodeVarintCircuit(dAtA, i, uint64(*m.Status)) - i-- - dAtA[i] = 0x20 - } - if m.Limit != nil { - { - size, err := m.Limit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCircuit(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Peer != nil { - { - size, err := m.Peer.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCircuit(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Type == nil { - return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("type") - } else { - i = encodeVarintCircuit(dAtA, i, uint64(*m.Type)) - i-- - dAtA[i] = 0x8 +func (x *Peer) GetId() []byte { + if x != nil { + return x.Id } - return len(dAtA) - i, nil + return nil } -func (m *Peer) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *Peer) GetAddrs() [][]byte { + if x != nil { + return x.Addrs } - return dAtA[:n], nil + return nil } -func (m *Peer) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +type Reservation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Peer) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Addrs) > 0 { - for iNdEx := len(m.Addrs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Addrs[iNdEx]) - copy(dAtA[i:], m.Addrs[iNdEx]) - i = encodeVarintCircuit(dAtA, i, uint64(len(m.Addrs[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if m.Id == nil { - return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("id") - } else { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarintCircuit(dAtA, i, uint64(len(m.Id))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + // This field is marked optional for backwards compatibility with proto2. + // Users should make sure to always set this. + Expire *uint64 `protobuf:"varint,1,opt,name=expire,proto3,oneof" json:"expire,omitempty"` // Unix expiration time (UTC) + Addrs [][]byte `protobuf:"bytes,2,rep,name=addrs,proto3" json:"addrs,omitempty"` // relay addrs for reserving peer + Voucher []byte `protobuf:"bytes,3,opt,name=voucher,proto3,oneof" json:"voucher,omitempty"` // reservation voucher } -func (m *Reservation) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *Reservation) Reset() { + *x = Reservation{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_circuit_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *Reservation) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *Reservation) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Reservation) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Voucher != nil { - i -= len(m.Voucher) - copy(dAtA[i:], m.Voucher) - i = encodeVarintCircuit(dAtA, i, uint64(len(m.Voucher))) - i-- - dAtA[i] = 0x1a - } - if len(m.Addrs) > 0 { - for iNdEx := len(m.Addrs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Addrs[iNdEx]) - copy(dAtA[i:], m.Addrs[iNdEx]) - i = encodeVarintCircuit(dAtA, i, uint64(len(m.Addrs[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if m.Expire == nil { - return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("expire") - } else { - i = encodeVarintCircuit(dAtA, i, uint64(*m.Expire)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} +func (*Reservation) ProtoMessage() {} -func (m *Limit) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *Reservation) ProtoReflect() protoreflect.Message { + mi := &file_pb_circuit_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *Limit) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use Reservation.ProtoReflect.Descriptor instead. +func (*Reservation) Descriptor() ([]byte, []int) { + return file_pb_circuit_proto_rawDescGZIP(), []int{3} } -func (m *Limit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func (x *Reservation) GetExpire() uint64 { + if x != nil && x.Expire != nil { + return *x.Expire } - if m.Data != nil { - i = encodeVarintCircuit(dAtA, i, uint64(*m.Data)) - i-- - dAtA[i] = 0x10 - } - if m.Duration != nil { - i = encodeVarintCircuit(dAtA, i, uint64(*m.Duration)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil + return 0 } -func encodeVarintCircuit(dAtA []byte, offset int, v uint64) int { - offset -= sovCircuit(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *HopMessage) Size() (n int) { - if m == nil { - return 0 +func (x *Reservation) GetAddrs() [][]byte { + if x != nil { + return x.Addrs } - var l int - _ = l - if m.Type != nil { - n += 1 + sovCircuit(uint64(*m.Type)) - } - if m.Peer != nil { - l = m.Peer.Size() - n += 1 + l + sovCircuit(uint64(l)) - } - if m.Reservation != nil { - l = m.Reservation.Size() - n += 1 + l + sovCircuit(uint64(l)) - } - if m.Limit != nil { - l = m.Limit.Size() - n += 1 + l + sovCircuit(uint64(l)) - } - if m.Status != nil { - n += 1 + sovCircuit(uint64(*m.Status)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + return nil } -func (m *StopMessage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Type != nil { - n += 1 + sovCircuit(uint64(*m.Type)) - } - if m.Peer != nil { - l = m.Peer.Size() - n += 1 + l + sovCircuit(uint64(l)) - } - if m.Limit != nil { - l = m.Limit.Size() - n += 1 + l + sovCircuit(uint64(l)) +func (x *Reservation) GetVoucher() []byte { + if x != nil { + return x.Voucher } - if m.Status != nil { - n += 1 + sovCircuit(uint64(*m.Status)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + return nil } -func (m *Peer) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Id != nil { - l = len(m.Id) - n += 1 + l + sovCircuit(uint64(l)) - } - if len(m.Addrs) > 0 { - for _, b := range m.Addrs { - l = len(b) - n += 1 + l + sovCircuit(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} +type Limit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Reservation) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Expire != nil { - n += 1 + sovCircuit(uint64(*m.Expire)) - } - if len(m.Addrs) > 0 { - for _, b := range m.Addrs { - l = len(b) - n += 1 + l + sovCircuit(uint64(l)) - } - } - if m.Voucher != nil { - l = len(m.Voucher) - n += 1 + l + sovCircuit(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + Duration *uint32 `protobuf:"varint,1,opt,name=duration,proto3,oneof" json:"duration,omitempty"` // seconds + Data *uint64 `protobuf:"varint,2,opt,name=data,proto3,oneof" json:"data,omitempty"` // bytes } -func (m *Limit) Size() (n int) { - if m == nil { - return 0 +func (x *Limit) Reset() { + *x = Limit{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_circuit_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - var l int - _ = l - if m.Duration != nil { - n += 1 + sovCircuit(uint64(*m.Duration)) - } - if m.Data != nil { - n += 1 + sovCircuit(uint64(*m.Data)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n } -func sovCircuit(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 +func (x *Limit) String() string { + return protoimpl.X.MessageStringOf(x) } -func sozCircuit(x uint64) (n int) { - return sovCircuit(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *HopMessage) Unmarshal(dAtA []byte) error { - var hasFields [1]uint64 - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HopMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HopMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var v HopMessage_Type - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= HopMessage_Type(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Type = &v - hasFields[0] |= uint64(0x00000001) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCircuit - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCircuit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Peer == nil { - m.Peer = &Peer{} - } - if err := m.Peer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reservation", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCircuit - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCircuit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Reservation == nil { - m.Reservation = &Reservation{} - } - if err := m.Reservation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCircuit - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCircuit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Limit == nil { - m.Limit = &Limit{} - } - if err := m.Limit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var v Status - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= Status(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Status = &v - default: - iNdEx = preIndex - skippy, err := skipCircuit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCircuit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if hasFields[0]&uint64(0x00000001) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("type") - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StopMessage) Unmarshal(dAtA []byte) error { - var hasFields [1]uint64 - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StopMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StopMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var v StopMessage_Type - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= StopMessage_Type(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Type = &v - hasFields[0] |= uint64(0x00000001) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCircuit - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCircuit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Peer == nil { - m.Peer = &Peer{} - } - if err := m.Peer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCircuit - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCircuit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Limit == nil { - m.Limit = &Limit{} - } - if err := m.Limit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var v Status - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= Status(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Status = &v - default: - iNdEx = preIndex - skippy, err := skipCircuit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCircuit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if hasFields[0]&uint64(0x00000001) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("type") - } +func (*Limit) ProtoMessage() {} - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Peer) Unmarshal(dAtA []byte) error { - var hasFields [1]uint64 - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Peer: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Peer: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCircuit - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCircuit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...) - if m.Id == nil { - m.Id = []byte{} - } - iNdEx = postIndex - hasFields[0] |= uint64(0x00000001) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCircuit - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCircuit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addrs = append(m.Addrs, make([]byte, postIndex-iNdEx)) - copy(m.Addrs[len(m.Addrs)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCircuit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCircuit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy +func (x *Limit) ProtoReflect() protoreflect.Message { + mi := &file_pb_circuit_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } - if hasFields[0]&uint64(0x00000001) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("id") - } + return mi.MessageOf(x) +} - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil +// Deprecated: Use Limit.ProtoReflect.Descriptor instead. +func (*Limit) Descriptor() ([]byte, []int) { + return file_pb_circuit_proto_rawDescGZIP(), []int{4} } -func (m *Reservation) Unmarshal(dAtA []byte) error { - var hasFields [1]uint64 - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Reservation: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Reservation: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Expire", wireType) - } - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Expire = &v - hasFields[0] |= uint64(0x00000001) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCircuit - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCircuit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addrs = append(m.Addrs, make([]byte, postIndex-iNdEx)) - copy(m.Addrs[len(m.Addrs)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Voucher", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCircuit - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCircuit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Voucher = append(m.Voucher[:0], dAtA[iNdEx:postIndex]...) - if m.Voucher == nil { - m.Voucher = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCircuit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCircuit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if hasFields[0]&uint64(0x00000001) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("expire") - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *Limit) GetDuration() uint32 { + if x != nil && x.Duration != nil { + return *x.Duration } - return nil + return 0 } -func (m *Limit) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Limit: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Limit: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) - } - var v uint32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Duration = &v - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCircuit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Data = &v - default: - iNdEx = preIndex - skippy, err := skipCircuit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCircuit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *Limit) GetData() uint64 { + if x != nil && x.Data != nil { + return *x.Data } - return nil + return 0 } -func skipCircuit(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCircuit - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCircuit - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCircuit - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthCircuit - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupCircuit - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthCircuit - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF + +var File_pb_circuit_proto protoreflect.FileDescriptor + +var file_pb_circuit_proto_rawDesc = []byte{ + 0x0a, 0x10, 0x70, 0x62, 0x2f, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0a, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x2e, 0x70, 0x62, 0x22, 0xf1, + 0x02, 0x0a, 0x0a, 0x48, 0x6f, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x34, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x63, 0x69, + 0x72, 0x63, 0x75, 0x69, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x6f, 0x70, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x88, 0x01, 0x01, 0x12, 0x29, 0x0a, 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, + 0x65, 0x65, 0x72, 0x48, 0x01, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x88, 0x01, 0x01, 0x12, 0x3e, + 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x2e, 0x70, 0x62, + 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x02, 0x52, 0x0b, + 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x2c, + 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, + 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x6d, 0x69, 0x74, + 0x48, 0x03, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x88, 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x63, + 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x48, 0x04, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x88, 0x01, 0x01, 0x22, 0x2c, 0x0a, + 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, + 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x10, 0x01, 0x12, + 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x10, 0x02, 0x42, 0x07, 0x0a, 0x05, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x42, 0x0e, 0x0a, + 0x0c, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0x0a, + 0x06, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x22, 0x96, 0x02, 0x0a, 0x0b, 0x53, 0x74, 0x6f, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x35, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x1c, 0x2e, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x74, + 0x6f, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x88, 0x01, 0x01, 0x12, 0x29, 0x0a, 0x04, 0x70, 0x65, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, + 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x48, 0x01, 0x52, 0x04, 0x70, 0x65, 0x65, + 0x72, 0x88, 0x01, 0x01, 0x12, 0x2c, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x2e, 0x70, 0x62, + 0x2e, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x48, 0x02, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x88, + 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x2e, 0x70, 0x62, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x03, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x88, 0x01, 0x01, 0x22, 0x1f, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, + 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x41, 0x54, + 0x55, 0x53, 0x10, 0x01, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x42, 0x07, 0x0a, + 0x05, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x38, 0x0a, 0x04, 0x50, + 0x65, 0x65, 0x72, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, + 0x00, 0x52, 0x02, 0x69, 0x64, 0x88, 0x01, 0x01, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x72, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x42, 0x05, + 0x0a, 0x03, 0x5f, 0x69, 0x64, 0x22, 0x76, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x06, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x06, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x88, 0x01, + 0x01, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, + 0x52, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x12, 0x1d, 0x0a, 0x07, 0x76, 0x6f, 0x75, 0x63, 0x68, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x07, 0x76, 0x6f, 0x75, 0x63, + 0x68, 0x65, 0x72, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, + 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x76, 0x6f, 0x75, 0x63, 0x68, 0x65, 0x72, 0x22, 0x57, 0x0a, + 0x05, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1f, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x17, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x48, 0x01, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x88, 0x01, 0x01, + 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x07, 0x0a, + 0x05, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x2a, 0xca, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x4e, 0x55, 0x53, 0x45, 0x44, 0x10, 0x00, 0x12, 0x06, 0x0a, + 0x02, 0x4f, 0x4b, 0x10, 0x64, 0x12, 0x18, 0x0a, 0x13, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x41, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x46, 0x55, 0x53, 0x45, 0x44, 0x10, 0xc8, 0x01, 0x12, + 0x1c, 0x0a, 0x17, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x4c, 0x49, 0x4d, 0x49, + 0x54, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0xc9, 0x01, 0x12, 0x16, 0x0a, + 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45, 0x4e, 0x49, + 0x45, 0x44, 0x10, 0xca, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0xcb, 0x01, 0x12, 0x13, 0x0a, + 0x0e, 0x4e, 0x4f, 0x5f, 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, + 0xcc, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x4d, 0x41, 0x4c, 0x46, 0x4f, 0x52, 0x4d, 0x45, 0x44, 0x5f, + 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x90, 0x03, 0x12, 0x17, 0x0a, 0x12, 0x55, 0x4e, + 0x45, 0x58, 0x50, 0x45, 0x43, 0x54, 0x45, 0x44, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, + 0x10, 0x91, 0x03, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthCircuit = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowCircuit = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupCircuit = fmt.Errorf("proto: unexpected end of group") + file_pb_circuit_proto_rawDescOnce sync.Once + file_pb_circuit_proto_rawDescData = file_pb_circuit_proto_rawDesc ) + +func file_pb_circuit_proto_rawDescGZIP() []byte { + file_pb_circuit_proto_rawDescOnce.Do(func() { + file_pb_circuit_proto_rawDescData = protoimpl.X.CompressGZIP(file_pb_circuit_proto_rawDescData) + }) + return file_pb_circuit_proto_rawDescData +} + +var file_pb_circuit_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_pb_circuit_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_pb_circuit_proto_goTypes = []interface{}{ + (Status)(0), // 0: circuit.pb.Status + (HopMessage_Type)(0), // 1: circuit.pb.HopMessage.Type + (StopMessage_Type)(0), // 2: circuit.pb.StopMessage.Type + (*HopMessage)(nil), // 3: circuit.pb.HopMessage + (*StopMessage)(nil), // 4: circuit.pb.StopMessage + (*Peer)(nil), // 5: circuit.pb.Peer + (*Reservation)(nil), // 6: circuit.pb.Reservation + (*Limit)(nil), // 7: circuit.pb.Limit +} +var file_pb_circuit_proto_depIdxs = []int32{ + 1, // 0: circuit.pb.HopMessage.type:type_name -> circuit.pb.HopMessage.Type + 5, // 1: circuit.pb.HopMessage.peer:type_name -> circuit.pb.Peer + 6, // 2: circuit.pb.HopMessage.reservation:type_name -> circuit.pb.Reservation + 7, // 3: circuit.pb.HopMessage.limit:type_name -> circuit.pb.Limit + 0, // 4: circuit.pb.HopMessage.status:type_name -> circuit.pb.Status + 2, // 5: circuit.pb.StopMessage.type:type_name -> circuit.pb.StopMessage.Type + 5, // 6: circuit.pb.StopMessage.peer:type_name -> circuit.pb.Peer + 7, // 7: circuit.pb.StopMessage.limit:type_name -> circuit.pb.Limit + 0, // 8: circuit.pb.StopMessage.status:type_name -> circuit.pb.Status + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_pb_circuit_proto_init() } +func file_pb_circuit_proto_init() { + if File_pb_circuit_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pb_circuit_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HopMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pb_circuit_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pb_circuit_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Peer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pb_circuit_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Reservation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pb_circuit_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Limit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_pb_circuit_proto_msgTypes[0].OneofWrappers = []interface{}{} + file_pb_circuit_proto_msgTypes[1].OneofWrappers = []interface{}{} + file_pb_circuit_proto_msgTypes[2].OneofWrappers = []interface{}{} + file_pb_circuit_proto_msgTypes[3].OneofWrappers = []interface{}{} + file_pb_circuit_proto_msgTypes[4].OneofWrappers = []interface{}{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pb_circuit_proto_rawDesc, + NumEnums: 3, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pb_circuit_proto_goTypes, + DependencyIndexes: file_pb_circuit_proto_depIdxs, + EnumInfos: file_pb_circuit_proto_enumTypes, + MessageInfos: file_pb_circuit_proto_msgTypes, + }.Build() + File_pb_circuit_proto = out.File + file_pb_circuit_proto_rawDesc = nil + file_pb_circuit_proto_goTypes = nil + file_pb_circuit_proto_depIdxs = nil +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/circuit.proto b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/circuit.proto index 370566f4..b9b65fa0 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/circuit.proto +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/circuit.proto @@ -1,4 +1,4 @@ -syntax = "proto2"; +syntax = "proto3"; package circuit.pb; @@ -9,7 +9,9 @@ message HopMessage { STATUS = 2; } - required Type type = 1; + // This field is marked optional for backwards compatibility with proto2. + // Users should make sure to always set this. + optional Type type = 1; optional Peer peer = 2; optional Reservation reservation = 3; @@ -24,7 +26,9 @@ message StopMessage { STATUS = 1; } - required Type type = 1; + // This field is marked optional for backwards compatibility with proto2. + // Users should make sure to always set this. + optional Type type = 1; optional Peer peer = 2; optional Limit limit = 3; @@ -33,12 +37,16 @@ message StopMessage { } message Peer { - required bytes id = 1; + // This field is marked optional for backwards compatibility with proto2. + // Users should make sure to always set this. + optional bytes id = 1; repeated bytes addrs = 2; } message Reservation { - required uint64 expire = 1; // Unix expiration time (UTC) + // This field is marked optional for backwards compatibility with proto2. + // Users should make sure to always set this. + optional uint64 expire = 1; // Unix expiration time (UTC) repeated bytes addrs = 2; // relay addrs for reserving peer optional bytes voucher = 3; // reservation voucher } @@ -49,6 +57,8 @@ message Limit { } enum Status { + // zero value field required for proto3 compatibility + UNUSED = 0; OK = 100; RESERVATION_REFUSED = 200; RESOURCE_LIMIT_EXCEEDED = 201; @@ -57,4 +67,4 @@ enum Status { NO_RESERVATION = 204; MALFORMED_MESSAGE = 400; UNEXPECTED_MESSAGE = 401; -} +} \ No newline at end of file diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/voucher.pb.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/voucher.pb.go index 6fed0082..917b5370 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/voucher.pb.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/voucher.pb.go @@ -1,438 +1,167 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: voucher.proto +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: pb/voucher.proto -package circuit_pb +package pb import ( - fmt "fmt" - github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type ReservationVoucher struct { - Relay []byte `protobuf:"bytes,1,req,name=relay" json:"relay,omitempty"` - Peer []byte `protobuf:"bytes,2,req,name=peer" json:"peer,omitempty"` - Expiration *uint64 `protobuf:"varint,3,req,name=expiration" json:"expiration,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // These fields are marked optional for backwards compatibility with proto2. + // Users should make sure to always set these. + Relay []byte `protobuf:"bytes,1,opt,name=relay,proto3,oneof" json:"relay,omitempty"` + Peer []byte `protobuf:"bytes,2,opt,name=peer,proto3,oneof" json:"peer,omitempty"` + Expiration *uint64 `protobuf:"varint,3,opt,name=expiration,proto3,oneof" json:"expiration,omitempty"` } -func (m *ReservationVoucher) Reset() { *m = ReservationVoucher{} } -func (m *ReservationVoucher) String() string { return proto.CompactTextString(m) } -func (*ReservationVoucher) ProtoMessage() {} -func (*ReservationVoucher) Descriptor() ([]byte, []int) { - return fileDescriptor_a22a9b0d3335ba25, []int{0} +func (x *ReservationVoucher) Reset() { + *x = ReservationVoucher{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_voucher_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ReservationVoucher) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + +func (x *ReservationVoucher) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ReservationVoucher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReservationVoucher.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (*ReservationVoucher) ProtoMessage() {} + +func (x *ReservationVoucher) ProtoReflect() protoreflect.Message { + mi := &file_pb_voucher_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *ReservationVoucher) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReservationVoucher.Merge(m, src) -} -func (m *ReservationVoucher) XXX_Size() int { - return m.Size() -} -func (m *ReservationVoucher) XXX_DiscardUnknown() { - xxx_messageInfo_ReservationVoucher.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_ReservationVoucher proto.InternalMessageInfo +// Deprecated: Use ReservationVoucher.ProtoReflect.Descriptor instead. +func (*ReservationVoucher) Descriptor() ([]byte, []int) { + return file_pb_voucher_proto_rawDescGZIP(), []int{0} +} -func (m *ReservationVoucher) GetRelay() []byte { - if m != nil { - return m.Relay +func (x *ReservationVoucher) GetRelay() []byte { + if x != nil { + return x.Relay } return nil } -func (m *ReservationVoucher) GetPeer() []byte { - if m != nil { - return m.Peer +func (x *ReservationVoucher) GetPeer() []byte { + if x != nil { + return x.Peer } return nil } -func (m *ReservationVoucher) GetExpiration() uint64 { - if m != nil && m.Expiration != nil { - return *m.Expiration +func (x *ReservationVoucher) GetExpiration() uint64 { + if x != nil && x.Expiration != nil { + return *x.Expiration } return 0 } -func init() { - proto.RegisterType((*ReservationVoucher)(nil), "circuit.pb.ReservationVoucher") -} - -func init() { proto.RegisterFile("voucher.proto", fileDescriptor_a22a9b0d3335ba25) } - -var fileDescriptor_a22a9b0d3335ba25 = []byte{ - // 135 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2d, 0xcb, 0x2f, 0x4d, - 0xce, 0x48, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4a, 0xce, 0x2c, 0x4a, 0x2e, - 0xcd, 0x2c, 0xd1, 0x2b, 0x48, 0x52, 0x8a, 0xe3, 0x12, 0x0a, 0x4a, 0x2d, 0x4e, 0x2d, 0x2a, 0x4b, - 0x2c, 0xc9, 0xcc, 0xcf, 0x0b, 0x83, 0xa8, 0x13, 0x12, 0xe1, 0x62, 0x2d, 0x4a, 0xcd, 0x49, 0xac, - 0x94, 0x60, 0x54, 0x60, 0xd2, 0xe0, 0x09, 0x82, 0x70, 0x84, 0x84, 0xb8, 0x58, 0x0a, 0x52, 0x53, - 0x8b, 0x24, 0x98, 0xc0, 0x82, 0x60, 0xb6, 0x90, 0x1c, 0x17, 0x57, 0x6a, 0x45, 0x41, 0x66, 0x11, - 0x58, 0xbb, 0x04, 0xb3, 0x02, 0x93, 0x06, 0x4b, 0x10, 0x92, 0x88, 0x13, 0xcf, 0x89, 0x47, 0x72, - 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x08, 0x08, 0x00, 0x00, 0xff, 0xff, 0xc0, - 0x81, 0x3a, 0xee, 0x89, 0x00, 0x00, 0x00, -} - -func (m *ReservationVoucher) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReservationVoucher) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +var File_pb_voucher_proto protoreflect.FileDescriptor -func (m *ReservationVoucher) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Expiration == nil { - return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("expiration") - } else { - i = encodeVarintVoucher(dAtA, i, uint64(*m.Expiration)) - i-- - dAtA[i] = 0x18 - } - if m.Peer == nil { - return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("peer") - } else { - i -= len(m.Peer) - copy(dAtA[i:], m.Peer) - i = encodeVarintVoucher(dAtA, i, uint64(len(m.Peer))) - i-- - dAtA[i] = 0x12 - } - if m.Relay == nil { - return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("relay") - } else { - i -= len(m.Relay) - copy(dAtA[i:], m.Relay) - i = encodeVarintVoucher(dAtA, i, uint64(len(m.Relay))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil +var file_pb_voucher_proto_rawDesc = []byte{ + 0x0a, 0x10, 0x70, 0x62, 0x2f, 0x76, 0x6f, 0x75, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0a, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x2e, 0x70, 0x62, 0x22, 0x8f, + 0x01, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x6f, + 0x75, 0x63, 0x68, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x05, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x05, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x88, 0x01, 0x01, + 0x12, 0x17, 0x0a, 0x04, 0x70, 0x65, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, + 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x88, 0x01, 0x01, 0x12, 0x23, 0x0a, 0x0a, 0x65, 0x78, 0x70, + 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x48, 0x02, 0x52, + 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x08, + 0x0a, 0x06, 0x5f, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x70, 0x65, 0x65, + 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func encodeVarintVoucher(dAtA []byte, offset int, v uint64) int { - offset -= sovVoucher(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ReservationVoucher) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Relay != nil { - l = len(m.Relay) - n += 1 + l + sovVoucher(uint64(l)) - } - if m.Peer != nil { - l = len(m.Peer) - n += 1 + l + sovVoucher(uint64(l)) - } - if m.Expiration != nil { - n += 1 + sovVoucher(uint64(*m.Expiration)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovVoucher(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozVoucher(x uint64) (n int) { - return sovVoucher(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ReservationVoucher) Unmarshal(dAtA []byte) error { - var hasFields [1]uint64 - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowVoucher - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReservationVoucher: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReservationVoucher: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Relay", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowVoucher - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthVoucher - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthVoucher - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Relay = append(m.Relay[:0], dAtA[iNdEx:postIndex]...) - if m.Relay == nil { - m.Relay = []byte{} - } - iNdEx = postIndex - hasFields[0] |= uint64(0x00000001) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowVoucher - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthVoucher - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthVoucher - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Peer = append(m.Peer[:0], dAtA[iNdEx:postIndex]...) - if m.Peer == nil { - m.Peer = []byte{} - } - iNdEx = postIndex - hasFields[0] |= uint64(0x00000002) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Expiration", wireType) - } - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowVoucher - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Expiration = &v - hasFields[0] |= uint64(0x00000004) - default: - iNdEx = preIndex - skippy, err := skipVoucher(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthVoucher - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthVoucher - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if hasFields[0]&uint64(0x00000001) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("relay") - } - if hasFields[0]&uint64(0x00000002) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("peer") - } - if hasFields[0]&uint64(0x00000004) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("expiration") - } +var ( + file_pb_voucher_proto_rawDescOnce sync.Once + file_pb_voucher_proto_rawDescData = file_pb_voucher_proto_rawDesc +) - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipVoucher(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowVoucher - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break +func file_pb_voucher_proto_rawDescGZIP() []byte { + file_pb_voucher_proto_rawDescOnce.Do(func() { + file_pb_voucher_proto_rawDescData = protoimpl.X.CompressGZIP(file_pb_voucher_proto_rawDescData) + }) + return file_pb_voucher_proto_rawDescData +} + +var file_pb_voucher_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_pb_voucher_proto_goTypes = []interface{}{ + (*ReservationVoucher)(nil), // 0: circuit.pb.ReservationVoucher +} +var file_pb_voucher_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_pb_voucher_proto_init() } +func file_pb_voucher_proto_init() { + if File_pb_voucher_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pb_voucher_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReservationVoucher); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowVoucher - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowVoucher - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthVoucher - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupVoucher - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthVoucher - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + file_pb_voucher_proto_msgTypes[0].OneofWrappers = []interface{}{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pb_voucher_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pb_voucher_proto_goTypes, + DependencyIndexes: file_pb_voucher_proto_depIdxs, + MessageInfos: file_pb_voucher_proto_msgTypes, + }.Build() + File_pb_voucher_proto = out.File + file_pb_voucher_proto_rawDesc = nil + file_pb_voucher_proto_goTypes = nil + file_pb_voucher_proto_depIdxs = nil } - -var ( - ErrInvalidLengthVoucher = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowVoucher = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupVoucher = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/voucher.proto b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/voucher.proto index 08644025..1e2e7963 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/voucher.proto +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb/voucher.proto @@ -1,9 +1,11 @@ -syntax = "proto2"; +syntax = "proto3"; package circuit.pb; message ReservationVoucher { - required bytes relay = 1; - required bytes peer = 2; - required uint64 expiration = 3; -} + // These fields are marked optional for backwards compatibility with proto2. + // Users should make sure to always set these. + optional bytes relay = 1; + optional bytes peer = 2; + optional uint64 expiration = 3; +} \ No newline at end of file diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto/protocol.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto/protocol.go index d27fc509..4b6d96b8 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto/protocol.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto/protocol.go @@ -1,7 +1,6 @@ package proto const ( - ProtoIDv1 = "/libp2p/circuit/relay/0.1.0" ProtoIDv2Hop = "/libp2p/circuit/relay/0.2.0/hop" ProtoIDv2Stop = "/libp2p/circuit/relay/0.2.0/stop" ) diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto/voucher.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto/voucher.go index fd50fccc..7114d81c 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto/voucher.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto/voucher.go @@ -6,6 +6,8 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/record" pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb" + + "google.golang.org/protobuf/proto" ) const RecordDomain = "libp2p-relay-rsvp" @@ -37,21 +39,17 @@ func (rv *ReservationVoucher) Codec() []byte { } func (rv *ReservationVoucher) MarshalRecord() ([]byte, error) { - relay := []byte(rv.Relay) - peer := []byte(rv.Peer) expiration := uint64(rv.Expiration.Unix()) - pbrv := &pbv2.ReservationVoucher{ - Relay: relay, - Peer: peer, + return proto.Marshal(&pbv2.ReservationVoucher{ + Relay: []byte(rv.Relay), + Peer: []byte(rv.Peer), Expiration: &expiration, - } - - return pbrv.Marshal() + }) } func (rv *ReservationVoucher) UnmarshalRecord(blob []byte) error { pbrv := pbv2.ReservationVoucher{} - err := pbrv.Unmarshal(blob) + err := proto.Unmarshal(blob, &pbrv) if err != nil { return err } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay/metrics.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay/metrics.go new file mode 100644 index 00000000..77864591 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay/metrics.go @@ -0,0 +1,268 @@ +package relay + +import ( + "time" + + "github.com/libp2p/go-libp2p/p2p/metricshelper" + pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb" + "github.com/prometheus/client_golang/prometheus" +) + +const metricNamespace = "libp2p_relaysvc" + +var ( + status = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: metricNamespace, + Name: "status", + Help: "Relay Status", + }, + ) + + reservationsTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "reservations_total", + Help: "Relay Reservation Request", + }, + []string{"type"}, + ) + reservationRequestResponseStatusTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "reservation_request_response_status_total", + Help: "Relay Reservation Request Response Status", + }, + []string{"status"}, + ) + reservationRejectionsTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "reservation_rejections_total", + Help: "Relay Reservation Rejected Reason", + }, + []string{"reason"}, + ) + + connectionsTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "connections_total", + Help: "Relay Connection Total", + }, + []string{"type"}, + ) + connectionRequestResponseStatusTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "connection_request_response_status_total", + Help: "Relay Connection Request Status", + }, + []string{"status"}, + ) + connectionRejectionsTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "connection_rejections_total", + Help: "Relay Connection Rejected Reason", + }, + []string{"reason"}, + ) + connectionDurationSeconds = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: metricNamespace, + Name: "connection_duration_seconds", + Help: "Relay Connection Duration", + }, + ) + + dataTransferredBytesTotal = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "data_transferred_bytes_total", + Help: "Bytes Transferred Total", + }, + ) + + collectors = []prometheus.Collector{ + status, + reservationsTotal, + reservationRequestResponseStatusTotal, + reservationRejectionsTotal, + connectionsTotal, + connectionRequestResponseStatusTotal, + connectionRejectionsTotal, + connectionDurationSeconds, + dataTransferredBytesTotal, + } +) + +const ( + requestStatusOK = "ok" + requestStatusRejected = "rejected" + requestStatusError = "error" +) + +// MetricsTracer is the interface for tracking metrics for relay service +type MetricsTracer interface { + // RelayStatus tracks whether the service is currently active + RelayStatus(enabled bool) + + // ConnectionOpened tracks metrics on opening a relay connection + ConnectionOpened() + // ConnectionClosed tracks metrics on closing a relay connection + ConnectionClosed(d time.Duration) + // ConnectionRequestHandled tracks metrics on handling a relay connection request + ConnectionRequestHandled(status pbv2.Status) + + // ReservationAllowed tracks metrics on opening or renewing a relay reservation + ReservationAllowed(isRenewal bool) + // ReservationRequestClosed tracks metrics on closing a relay reservation + ReservationClosed(cnt int) + // ReservationRequestHandled tracks metrics on handling a relay reservation request + ReservationRequestHandled(status pbv2.Status) + + // BytesTransferred tracks the total bytes transferred by the relay service + BytesTransferred(cnt int) +} + +type metricsTracer struct{} + +var _ MetricsTracer = &metricsTracer{} + +type metricsTracerSetting struct { + reg prometheus.Registerer +} + +type MetricsTracerOption func(*metricsTracerSetting) + +func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption { + return func(s *metricsTracerSetting) { + if reg != nil { + s.reg = reg + } + } +} + +func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer { + setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer} + for _, opt := range opts { + opt(setting) + } + metricshelper.RegisterCollectors(setting.reg, collectors...) + return &metricsTracer{} +} + +func (mt *metricsTracer) RelayStatus(enabled bool) { + if enabled { + status.Set(1) + } else { + status.Set(0) + } +} + +func (mt *metricsTracer) ConnectionOpened() { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + *tags = append(*tags, "opened") + + connectionsTotal.WithLabelValues(*tags...).Add(1) +} + +func (mt *metricsTracer) ConnectionClosed(d time.Duration) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + *tags = append(*tags, "closed") + + connectionsTotal.WithLabelValues(*tags...).Add(1) + connectionDurationSeconds.Observe(d.Seconds()) +} + +func (mt *metricsTracer) ConnectionRequestHandled(status pbv2.Status) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + + respStatus := getResponseStatus(status) + + *tags = append(*tags, respStatus) + connectionRequestResponseStatusTotal.WithLabelValues(*tags...).Add(1) + if respStatus == requestStatusRejected { + *tags = (*tags)[:0] + *tags = append(*tags, getRejectionReason(status)) + connectionRejectionsTotal.WithLabelValues(*tags...).Add(1) + } +} + +func (mt *metricsTracer) ReservationAllowed(isRenewal bool) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + if isRenewal { + *tags = append(*tags, "renewed") + } else { + *tags = append(*tags, "opened") + } + + reservationsTotal.WithLabelValues(*tags...).Add(1) +} + +func (mt *metricsTracer) ReservationClosed(cnt int) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + *tags = append(*tags, "closed") + + reservationsTotal.WithLabelValues(*tags...).Add(float64(cnt)) +} + +func (mt *metricsTracer) ReservationRequestHandled(status pbv2.Status) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + + respStatus := getResponseStatus(status) + + *tags = append(*tags, respStatus) + reservationRequestResponseStatusTotal.WithLabelValues(*tags...).Add(1) + if respStatus == requestStatusRejected { + *tags = (*tags)[:0] + *tags = append(*tags, getRejectionReason(status)) + reservationRejectionsTotal.WithLabelValues(*tags...).Add(1) + } +} + +func (mt *metricsTracer) BytesTransferred(cnt int) { + dataTransferredBytesTotal.Add(float64(cnt)) +} + +func getResponseStatus(status pbv2.Status) string { + responseStatus := "unknown" + switch status { + case pbv2.Status_RESERVATION_REFUSED, + pbv2.Status_RESOURCE_LIMIT_EXCEEDED, + pbv2.Status_PERMISSION_DENIED, + pbv2.Status_NO_RESERVATION, + pbv2.Status_MALFORMED_MESSAGE: + + responseStatus = requestStatusRejected + case pbv2.Status_UNEXPECTED_MESSAGE, pbv2.Status_CONNECTION_FAILED: + responseStatus = requestStatusError + case pbv2.Status_OK: + responseStatus = requestStatusOK + } + return responseStatus +} + +func getRejectionReason(status pbv2.Status) string { + reason := "unknown" + switch status { + case pbv2.Status_RESERVATION_REFUSED: + reason = "ip constraint violation" + case pbv2.Status_RESOURCE_LIMIT_EXCEEDED: + reason = "resource limit exceeded" + case pbv2.Status_PERMISSION_DENIED: + reason = "permission denied" + case pbv2.Status_NO_RESERVATION: + reason = "no reservation" + case pbv2.Status_MALFORMED_MESSAGE: + reason = "malformed message" + } + return reason +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay/options.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay/options.go index 34641152..3b50ec38 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay/options.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay/options.go @@ -18,6 +18,14 @@ func WithLimit(limit *RelayLimit) Option { } } +// WithInfiniteLimits is a Relay option that disables limits. +func WithInfiniteLimits() Option { + return func(r *Relay) error { + r.rc.Limit = nil + return nil + } +} + // WithACL is a Relay option that supplies an ACLFilter for access control. func WithACL(acl ACLFilter) Option { return func(r *Relay) error { @@ -25,3 +33,11 @@ func WithACL(acl ACLFilter) Option { return nil } } + +// WithMetricsTracer is a Relay option that supplies a MetricsTracer for metrics +func WithMetricsTracer(mt MetricsTracer) Option { + return func(r *Relay) error { + r.metricsTracer = mt + return nil + } +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay/relay.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay/relay.go index 5e2d9f18..e3c8f475 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay/relay.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/relay/relay.go @@ -2,6 +2,7 @@ package relay import ( "context" + "errors" "fmt" "io" "sync" @@ -18,6 +19,7 @@ import ( logging "github.com/ipfs/go-log/v2" pool "github.com/libp2p/go-buffer-pool" + asnutil "github.com/libp2p/go-libp2p-asn-util" ma "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" ) @@ -41,7 +43,6 @@ var log = logging.Logger("relay") // Relay is the (limited) relay service object. type Relay struct { - closed uint32 ctx context.Context cancel func() @@ -50,12 +51,16 @@ type Relay struct { acl ACLFilter constraints *constraints scope network.ResourceScopeSpan + notifiee network.Notifiee - mx sync.Mutex - rsvp map[peer.ID]time.Time - conns map[peer.ID]int + mx sync.Mutex + rsvp map[peer.ID]time.Time + conns map[peer.ID]int + closed bool selfAddr ma.Multiaddr + + metricsTracer MetricsTracer } // New constructs a new limited relay that can provide relay services in the given host. @@ -94,26 +99,34 @@ func New(h host.Host, opts ...Option) (*Relay, error) { r.selfAddr = ma.StringCast(fmt.Sprintf("/p2p/%s", h.ID())) h.SetStreamHandler(proto.ProtoIDv2Hop, r.handleStream) - h.Network().Notify( - &network.NotifyBundle{ - DisconnectedF: r.disconnected, - }) + r.notifiee = &network.NotifyBundle{DisconnectedF: r.disconnected} + h.Network().Notify(r.notifiee) + + if r.metricsTracer != nil { + r.metricsTracer.RelayStatus(true) + } go r.background() return r, nil } func (r *Relay) Close() error { - if atomic.CompareAndSwapUint32(&r.closed, 0, 1) { + r.mx.Lock() + if !r.closed { + r.closed = true + r.mx.Unlock() + r.host.RemoveStreamHandler(proto.ProtoIDv2Hop) + r.host.Network().StopNotify(r.notifiee) r.scope.Done() r.cancel() - r.mx.Lock() - for p := range r.rsvp { - r.host.ConnManager().UntagPeer(p, "relay-reservation") + r.gc() + if r.metricsTracer != nil { + r.metricsTracer.RelayStatus(false) } - r.mx.Unlock() + return nil } + r.mx.Unlock() return nil } @@ -147,38 +160,48 @@ func (r *Relay) handleStream(s network.Stream) { } // reset stream deadline as message has been read s.SetReadDeadline(time.Time{}) - switch msg.GetType() { case pbv2.HopMessage_RESERVE: - r.handleReserve(s) - + status := r.handleReserve(s) + if r.metricsTracer != nil { + r.metricsTracer.ReservationRequestHandled(status) + } case pbv2.HopMessage_CONNECT: - r.handleConnect(s, &msg) - + status := r.handleConnect(s, &msg) + if r.metricsTracer != nil { + r.metricsTracer.ConnectionRequestHandled(status) + } default: r.handleError(s, pbv2.Status_MALFORMED_MESSAGE) } } -func (r *Relay) handleReserve(s network.Stream) { +func (r *Relay) handleReserve(s network.Stream) pbv2.Status { defer s.Close() - p := s.Conn().RemotePeer() a := s.Conn().RemoteMultiaddr() if isRelayAddr(a) { log.Debugf("refusing relay reservation for %s; reservation attempt over relay connection") r.handleError(s, pbv2.Status_PERMISSION_DENIED) - return + return pbv2.Status_PERMISSION_DENIED } if r.acl != nil && !r.acl.AllowReserve(p, a) { log.Debugf("refusing relay reservation for %s; permission denied", p) r.handleError(s, pbv2.Status_PERMISSION_DENIED) - return + return pbv2.Status_PERMISSION_DENIED } r.mx.Lock() + // Check if relay is still active. Otherwise ConnManager.UnTagPeer will not be called if this block runs after + // Close() call + if r.closed { + r.mx.Unlock() + log.Debugf("refusing relay reservation for %s; relay closed", p) + r.handleError(s, pbv2.Status_PERMISSION_DENIED) + return pbv2.Status_PERMISSION_DENIED + } now := time.Now() _, exists := r.rsvp[p] @@ -187,7 +210,7 @@ func (r *Relay) handleReserve(s network.Stream) { r.mx.Unlock() log.Debugf("refusing relay reservation for %s; IP constraint violation: %s", p, err) r.handleError(s, pbv2.Status_RESERVATION_REFUSED) - return + return pbv2.Status_RESERVATION_REFUSED } } @@ -195,6 +218,9 @@ func (r *Relay) handleReserve(s network.Stream) { r.rsvp[p] = expire r.host.ConnManager().TagPeer(p, "relay-reservation", ReservationTagWeight) r.mx.Unlock() + if r.metricsTracer != nil { + r.metricsTracer.ReservationAllowed(exists) + } log.Debugf("reserving relay slot for %s", p) @@ -204,10 +230,12 @@ func (r *Relay) handleReserve(s network.Stream) { if err := r.writeResponse(s, pbv2.Status_OK, r.makeReservationMsg(p, expire), r.makeLimitMsg(p)); err != nil { log.Debugf("error writing reservation response; retracting reservation for %s", p) s.Reset() + return pbv2.Status_CONNECTION_FAILED } + return pbv2.Status_OK } -func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) { +func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) pbv2.Status { src := s.Conn().RemotePeer() a := s.Conn().RemoteMultiaddr() @@ -215,7 +243,7 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) { if err != nil { log.Debugf("failed to begin relay transaction: %s", err) r.handleError(s, pbv2.Status_RESOURCE_LIMIT_EXCEEDED) - return + return pbv2.Status_RESOURCE_LIMIT_EXCEEDED } fail := func(status pbv2.Status) { @@ -227,25 +255,25 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) { if err := span.ReserveMemory(2*r.rc.BufferSize, network.ReservationPriorityHigh); err != nil { log.Debugf("error reserving memory for relay: %s", err) fail(pbv2.Status_RESOURCE_LIMIT_EXCEEDED) - return + return pbv2.Status_RESOURCE_LIMIT_EXCEEDED } if isRelayAddr(a) { log.Debugf("refusing connection from %s; connection attempt over relay connection") fail(pbv2.Status_PERMISSION_DENIED) - return + return pbv2.Status_PERMISSION_DENIED } dest, err := util.PeerToPeerInfoV2(msg.GetPeer()) if err != nil { fail(pbv2.Status_MALFORMED_MESSAGE) - return + return pbv2.Status_MALFORMED_MESSAGE } if r.acl != nil && !r.acl.AllowConnect(src, s.Conn().RemoteMultiaddr(), dest.ID) { log.Debugf("refusing connection from %s to %s; permission denied", src, dest.ID) fail(pbv2.Status_PERMISSION_DENIED) - return + return pbv2.Status_PERMISSION_DENIED } r.mx.Lock() @@ -254,7 +282,7 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) { r.mx.Unlock() log.Debugf("refusing connection from %s to %s; no reservation", src, dest.ID) fail(pbv2.Status_NO_RESERVATION) - return + return pbv2.Status_NO_RESERVATION } srcConns := r.conns[src] @@ -262,7 +290,7 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) { r.mx.Unlock() log.Debugf("refusing connection from %s to %s; too many connections from %s", src, dest.ID, src) fail(pbv2.Status_RESOURCE_LIMIT_EXCEEDED) - return + return pbv2.Status_RESOURCE_LIMIT_EXCEEDED } destConns := r.conns[dest.ID] @@ -270,19 +298,27 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) { r.mx.Unlock() log.Debugf("refusing connection from %s to %s; too many connecitons to %s", src, dest.ID, dest.ID) fail(pbv2.Status_RESOURCE_LIMIT_EXCEEDED) - return + return pbv2.Status_RESOURCE_LIMIT_EXCEEDED } r.addConn(src) r.addConn(dest.ID) r.mx.Unlock() + if r.metricsTracer != nil { + r.metricsTracer.ConnectionOpened() + } + connStTime := time.Now() + cleanup := func() { span.Done() r.mx.Lock() r.rmConn(src) r.rmConn(dest.ID) r.mx.Unlock() + if r.metricsTracer != nil { + r.metricsTracer.ConnectionClosed(time.Since(connStTime)) + } } ctx, cancel := context.WithTimeout(r.ctx, ConnectTimeout) @@ -295,7 +331,7 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) { log.Debugf("error opening relay stream to %s: %s", dest.ID, err) cleanup() r.handleError(s, pbv2.Status_CONNECTION_FAILED) - return + return pbv2.Status_CONNECTION_FAILED } fail = func(status pbv2.Status) { @@ -307,14 +343,14 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) { if err := bs.Scope().SetService(ServiceName); err != nil { log.Debugf("error attaching stream to relay service: %s", err) fail(pbv2.Status_RESOURCE_LIMIT_EXCEEDED) - return + return pbv2.Status_RESOURCE_LIMIT_EXCEEDED } // handshake if err := bs.Scope().ReserveMemory(maxMessageSize, network.ReservationPriorityAlways); err != nil { - log.Debugf("erro reserving memory for stream: %s", err) + log.Debugf("error reserving memory for stream: %s", err) fail(pbv2.Status_RESOURCE_LIMIT_EXCEEDED) - return + return pbv2.Status_RESOURCE_LIMIT_EXCEEDED } defer bs.Scope().ReleaseMemory(maxMessageSize) @@ -333,7 +369,7 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) { if err != nil { log.Debugf("error writing stop handshake") fail(pbv2.Status_CONNECTION_FAILED) - return + return pbv2.Status_CONNECTION_FAILED } stopmsg.Reset() @@ -342,19 +378,19 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) { if err != nil { log.Debugf("error reading stop response: %s", err.Error()) fail(pbv2.Status_CONNECTION_FAILED) - return + return pbv2.Status_CONNECTION_FAILED } if t := stopmsg.GetType(); t != pbv2.StopMessage_STATUS { log.Debugf("unexpected stop response; not a status message (%d)", t) fail(pbv2.Status_CONNECTION_FAILED) - return + return pbv2.Status_CONNECTION_FAILED } if status := stopmsg.GetStatus(); status != pbv2.Status_OK { log.Debugf("relay stop failure: %d", status) fail(pbv2.Status_CONNECTION_FAILED) - return + return pbv2.Status_CONNECTION_FAILED } var response pbv2.HopMessage @@ -369,7 +405,7 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) { bs.Reset() s.Reset() cleanup() - return + return pbv2.Status_CONNECTION_FAILED } // reset deadline @@ -377,11 +413,11 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) { log.Infof("relaying connection from %s to %s", src, dest.ID) - goroutines := new(int32) - *goroutines = 2 + var goroutines atomic.Int32 + goroutines.Store(2) done := func() { - if atomic.AddInt32(goroutines, -1) == 0 { + if goroutines.Add(-1) == 0 { s.Close() bs.Close() cleanup() @@ -398,6 +434,8 @@ func (r *Relay) handleConnect(s network.Stream, msg *pbv2.HopMessage) { go r.relayUnlimited(s, bs, src, dest.ID, done) go r.relayUnlimited(bs, s, dest.ID, src, done) } + + return pbv2.Status_OK } func (r *Relay) addConn(p peer.ID) { @@ -428,7 +466,7 @@ func (r *Relay) relayLimited(src, dest network.Stream, srcID, destID peer.ID, li limitedSrc := io.LimitReader(src, limit) - count, err := io.CopyBuffer(dest, limitedSrc, buf) + count, err := r.copyWithBuffer(dest, limitedSrc, buf) if err != nil { log.Debugf("relay copy error: %s", err) // Reset both. @@ -452,7 +490,7 @@ func (r *Relay) relayUnlimited(src, dest network.Stream, srcID, destID peer.ID, buf := pool.Get(r.rc.BufferSize) defer pool.Put(buf) - count, err := io.CopyBuffer(dest, src, buf) + count, err := r.copyWithBuffer(dest, src, buf) if err != nil { log.Debugf("relay copy error: %s", err) // Reset both. @@ -466,6 +504,47 @@ func (r *Relay) relayUnlimited(src, dest network.Stream, srcID, destID peer.ID, log.Debugf("relayed %d bytes from %s to %s", count, srcID, destID) } +// errInvalidWrite means that a write returned an impossible count. +// copied from io.errInvalidWrite +var errInvalidWrite = errors.New("invalid write result") + +// copyWithBuffer copies from src to dst using the provided buf until either EOF is reached +// on src or an error occurs. It reports the number of bytes transferred to metricsTracer. +// The implementation is a modified form of io.CopyBuffer to support metrics tracking. +func (r *Relay) copyWithBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) { + for { + nr, er := src.Read(buf) + if nr > 0 { + nw, ew := dst.Write(buf[0:nr]) + if nw < 0 || nr < nw { + nw = 0 + if ew == nil { + ew = errInvalidWrite + } + } + written += int64(nw) + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + if r.metricsTracer != nil { + r.metricsTracer.BytesTransferred(nw) + } + } + if er != nil { + if er != io.EOF { + err = er + } + break + } + } + return written, err +} + func (r *Relay) handleError(s network.Stream, status pbv2.Status) { log.Debugf("relay error: %s (%d)", pbv2.Status_name[int32(status)], status) err := r.writeResponse(s, status, nil, nil) @@ -545,6 +624,8 @@ func (r *Relay) makeLimitMsg(p peer.ID) *pbv2.Limit { } func (r *Relay) background() { + asnutil.Store.Init() + ticker := time.NewTicker(time.Minute) defer ticker.Stop() @@ -563,13 +644,17 @@ func (r *Relay) gc() { defer r.mx.Unlock() now := time.Now() - + cnt := 0 for p, expire := range r.rsvp { - if expire.Before(now) { + if r.closed || expire.Before(now) { delete(r.rsvp, p) r.host.ConnManager().UntagPeer(p, "relay-reservation") + cnt++ } } + if r.metricsTracer != nil { + r.metricsTracer.ReservationClosed(cnt) + } for p, count := range r.conns { if count == 0 { @@ -585,9 +670,15 @@ func (r *Relay) disconnected(n network.Network, c network.Conn) { } r.mx.Lock() - defer r.mx.Unlock() + _, ok := r.rsvp[p] + if ok { + delete(r.rsvp, p) + } + r.mx.Unlock() - delete(r.rsvp, p) + if ok && r.metricsTracer != nil { + r.metricsTracer.ReservationClosed(1) + } } func isRelayAddr(a ma.Multiaddr) bool { diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util/io.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util/io.go index de314b18..21e888d9 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util/io.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util/io.go @@ -5,10 +5,9 @@ import ( "io" pool "github.com/libp2p/go-buffer-pool" - "github.com/libp2p/go-msgio/protoio" - - "github.com/gogo/protobuf/proto" + "github.com/libp2p/go-msgio/pbio" "github.com/multiformats/go-varint" + "google.golang.org/protobuf/proto" ) type DelimitedReader struct { @@ -62,6 +61,6 @@ func (d *DelimitedReader) ReadMsg(msg proto.Message) error { return proto.Unmarshal(buf, msg) } -func NewDelimitedWriter(w io.Writer) protoio.WriteCloser { - return protoio.NewDelimitedWriter(w) +func NewDelimitedWriter(w io.Writer) pbio.WriteCloser { + return pbio.NewDelimitedWriter(w) } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util/pbconv.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util/pbconv.go index 4a884351..f5b72bf0 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util/pbconv.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/util/pbconv.go @@ -4,51 +4,11 @@ import ( "errors" "github.com/libp2p/go-libp2p/core/peer" - pbv1 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb" pbv2 "github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb" ma "github.com/multiformats/go-multiaddr" ) -func PeerToPeerInfoV1(p *pbv1.CircuitRelay_Peer) (peer.AddrInfo, error) { - if p == nil { - return peer.AddrInfo{}, errors.New("nil peer") - } - - id, err := peer.IDFromBytes(p.Id) - if err != nil { - return peer.AddrInfo{}, err - } - - var addrs []ma.Multiaddr - if len(p.Addrs) > 0 { - addrs = make([]ma.Multiaddr, 0, len(p.Addrs)) - } - - for _, addrBytes := range p.Addrs { - a, err := ma.NewMultiaddrBytes(addrBytes) - if err == nil { - addrs = append(addrs, a) - } - } - - return peer.AddrInfo{ID: id, Addrs: addrs}, nil -} - -func PeerInfoToPeerV1(pi peer.AddrInfo) *pbv1.CircuitRelay_Peer { - addrs := make([][]byte, 0, len(pi.Addrs)) - - for _, addr := range pi.Addrs { - addrs = append(addrs, addr.Bytes()) - } - - p := new(pbv1.CircuitRelay_Peer) - p.Id = []byte(pi.ID) - p.Addrs = addrs - - return p -} - func PeerToPeerInfoV2(p *pbv2.Peer) (peer.AddrInfo, error) { if p == nil { return peer.AddrInfo{}, errors.New("nil peer") @@ -73,14 +33,12 @@ func PeerToPeerInfoV2(p *pbv2.Peer) (peer.AddrInfo, error) { func PeerInfoToPeerV2(pi peer.AddrInfo) *pbv2.Peer { addrs := make([][]byte, 0, len(pi.Addrs)) - for _, addr := range pi.Addrs { addrs = append(addrs, addr.Bytes()) } - p := new(pbv2.Peer) - p.Id = []byte(pi.ID) - p.Addrs = addrs - - return p + return &pbv2.Peer{ + Id: []byte(pi.ID), + Addrs: addrs, + } } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/holepuncher.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/holepuncher.go index 5cf8f8fd..49c39f58 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/holepuncher.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/holepuncher.go @@ -10,15 +10,15 @@ import ( "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" - pb "github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb" + "github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb" "github.com/libp2p/go-libp2p/p2p/protocol/identify" - - "github.com/libp2p/go-msgio/protoio" - + "github.com/libp2p/go-msgio/pbio" ma "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" ) +//go:generate protoc --proto_path=$PWD:$PWD/../../.. --go_out=. --go_opt=Mpb/holepunch.proto=./pb pb/holepunch.proto + // ErrHolePunchActive is returned from DirectConnect when another hole punching attempt is currently running var ErrHolePunchActive = errors.New("another hole punching attempt to this peer is active") @@ -200,8 +200,8 @@ func (hp *holePuncher) initiateHolePunchImpl(str network.Stream) ([]ma.Multiaddr } defer str.Scope().ReleaseMemory(maxMsgSize) - w := protoio.NewDelimitedWriter(str) - rd := protoio.NewDelimitedReader(str, maxMsgSize) + w := pbio.NewDelimitedWriter(str) + rd := pbio.NewDelimitedReader(str, maxMsgSize) str.SetDeadline(time.Now().Add(StreamTimeout)) diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb/Makefile b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb/Makefile deleted file mode 100644 index eb14b576..00000000 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -PB = $(wildcard *.proto) -GO = $(PB:.proto=.pb.go) - -all: $(GO) - -%.pb.go: %.proto - protoc --proto_path=$(GOPATH)/src:. --gogofast_out=. $< - -clean: - rm -f *.pb.go - rm -f *.go diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb/holepunch.pb.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb/holepunch.pb.go index 3d7e21ac..ca568580 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb/holepunch.pb.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb/holepunch.pb.go @@ -1,27 +1,24 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: holepunch.proto +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: pb/holepunch.proto -package holepunch_pb +package pb import ( - fmt "fmt" - github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type HolePunch_Type int32 @@ -30,15 +27,17 @@ const ( HolePunch_SYNC HolePunch_Type = 300 ) -var HolePunch_Type_name = map[int32]string{ - 100: "CONNECT", - 300: "SYNC", -} - -var HolePunch_Type_value = map[string]int32{ - "CONNECT": 100, - "SYNC": 300, -} +// Enum value maps for HolePunch_Type. +var ( + HolePunch_Type_name = map[int32]string{ + 100: "CONNECT", + 300: "SYNC", + } + HolePunch_Type_value = map[string]int32{ + "CONNECT": 100, + "SYNC": 300, + } +) func (x HolePunch_Type) Enum() *HolePunch_Type { p := new(HolePunch_Type) @@ -47,369 +46,170 @@ func (x HolePunch_Type) Enum() *HolePunch_Type { } func (x HolePunch_Type) String() string { - return proto.EnumName(HolePunch_Type_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HolePunch_Type) Descriptor() protoreflect.EnumDescriptor { + return file_pb_holepunch_proto_enumTypes[0].Descriptor() +} + +func (HolePunch_Type) Type() protoreflect.EnumType { + return &file_pb_holepunch_proto_enumTypes[0] } -func (x *HolePunch_Type) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(HolePunch_Type_value, data, "HolePunch_Type") +func (x HolePunch_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *HolePunch_Type) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) if err != nil { return err } - *x = HolePunch_Type(value) + *x = HolePunch_Type(num) return nil } +// Deprecated: Use HolePunch_Type.Descriptor instead. func (HolePunch_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_290ddea0f23ef64a, []int{0, 0} + return file_pb_holepunch_proto_rawDescGZIP(), []int{0, 0} } // spec: https://github.com/libp2p/specs/blob/master/relay/DCUtR.md type HolePunch struct { - Type *HolePunch_Type `protobuf:"varint,1,req,name=type,enum=holepunch.pb.HolePunch_Type" json:"type,omitempty"` - ObsAddrs [][]byte `protobuf:"bytes,2,rep,name=ObsAddrs" json:"ObsAddrs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *HolePunch) Reset() { *m = HolePunch{} } -func (m *HolePunch) String() string { return proto.CompactTextString(m) } -func (*HolePunch) ProtoMessage() {} -func (*HolePunch) Descriptor() ([]byte, []int) { - return fileDescriptor_290ddea0f23ef64a, []int{0} -} -func (m *HolePunch) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HolePunch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HolePunch.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HolePunch) XXX_Merge(src proto.Message) { - xxx_messageInfo_HolePunch.Merge(m, src) -} -func (m *HolePunch) XXX_Size() int { - return m.Size() -} -func (m *HolePunch) XXX_DiscardUnknown() { - xxx_messageInfo_HolePunch.DiscardUnknown(m) + Type *HolePunch_Type `protobuf:"varint,1,req,name=type,enum=holepunch.pb.HolePunch_Type" json:"type,omitempty"` + ObsAddrs [][]byte `protobuf:"bytes,2,rep,name=ObsAddrs" json:"ObsAddrs,omitempty"` } -var xxx_messageInfo_HolePunch proto.InternalMessageInfo - -func (m *HolePunch) GetType() HolePunch_Type { - if m != nil && m.Type != nil { - return *m.Type +func (x *HolePunch) Reset() { + *x = HolePunch{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_holepunch_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return HolePunch_CONNECT } -func (m *HolePunch) GetObsAddrs() [][]byte { - if m != nil { - return m.ObsAddrs - } - return nil -} - -func init() { - proto.RegisterEnum("holepunch.pb.HolePunch_Type", HolePunch_Type_name, HolePunch_Type_value) - proto.RegisterType((*HolePunch)(nil), "holepunch.pb.HolePunch") +func (x *HolePunch) String() string { + return protoimpl.X.MessageStringOf(x) } -func init() { proto.RegisterFile("holepunch.proto", fileDescriptor_290ddea0f23ef64a) } - -var fileDescriptor_290ddea0f23ef64a = []byte{ - // 153 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xcf, 0xc8, 0xcf, 0x49, - 0x2d, 0x28, 0xcd, 0x4b, 0xce, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x41, 0x12, 0x48, - 0x52, 0xaa, 0xe4, 0xe2, 0xf4, 0xc8, 0xcf, 0x49, 0x0d, 0x00, 0xf1, 0x85, 0x0c, 0xb8, 0x58, 0x4a, - 0x2a, 0x0b, 0x52, 0x25, 0x18, 0x15, 0x98, 0x34, 0xf8, 0x8c, 0x64, 0xf4, 0x90, 0x55, 0xea, 0xc1, - 0x95, 0xe9, 0x85, 0x54, 0x16, 0xa4, 0x06, 0x81, 0x55, 0x0a, 0x49, 0x71, 0x71, 0xf8, 0x27, 0x15, - 0x3b, 0xa6, 0xa4, 0x14, 0x15, 0x4b, 0x30, 0x29, 0x30, 0x6b, 0xf0, 0x04, 0xc1, 0xf9, 0x4a, 0x72, - 0x5c, 0x2c, 0x20, 0x95, 0x42, 0xdc, 0x5c, 0xec, 0xce, 0xfe, 0x7e, 0x7e, 0xae, 0xce, 0x21, 0x02, - 0x29, 0x42, 0x9c, 0x5c, 0x2c, 0xc1, 0x91, 0x7e, 0xce, 0x02, 0x6b, 0x98, 0x9c, 0x78, 0x4e, 0x3c, - 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0x46, 0x40, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x34, 0x8d, 0x41, 0x7d, 0xa8, 0x00, 0x00, 0x00, -} +func (*HolePunch) ProtoMessage() {} -func (m *HolePunch) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *HolePunch) ProtoReflect() protoreflect.Message { + mi := &file_pb_holepunch_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *HolePunch) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use HolePunch.ProtoReflect.Descriptor instead. +func (*HolePunch) Descriptor() ([]byte, []int) { + return file_pb_holepunch_proto_rawDescGZIP(), []int{0} } -func (m *HolePunch) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ObsAddrs) > 0 { - for iNdEx := len(m.ObsAddrs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ObsAddrs[iNdEx]) - copy(dAtA[i:], m.ObsAddrs[iNdEx]) - i = encodeVarintHolepunch(dAtA, i, uint64(len(m.ObsAddrs[iNdEx]))) - i-- - dAtA[i] = 0x12 - } +func (x *HolePunch) GetType() HolePunch_Type { + if x != nil && x.Type != nil { + return *x.Type } - if m.Type == nil { - return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("type") - } else { - i = encodeVarintHolepunch(dAtA, i, uint64(*m.Type)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil + return HolePunch_CONNECT } -func encodeVarintHolepunch(dAtA []byte, offset int, v uint64) int { - offset -= sovHolepunch(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *HolePunch) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Type != nil { - n += 1 + sovHolepunch(uint64(*m.Type)) +func (x *HolePunch) GetObsAddrs() [][]byte { + if x != nil { + return x.ObsAddrs } - if len(m.ObsAddrs) > 0 { - for _, b := range m.ObsAddrs { - l = len(b) - n += 1 + l + sovHolepunch(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + return nil } -func sovHolepunch(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozHolepunch(x uint64) (n int) { - return sovHolepunch(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *HolePunch) Unmarshal(dAtA []byte) error { - var hasFields [1]uint64 - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHolepunch - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HolePunch: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HolePunch: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var v HolePunch_Type - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHolepunch - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= HolePunch_Type(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Type = &v - hasFields[0] |= uint64(0x00000001) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObsAddrs", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHolepunch - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthHolepunch - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthHolepunch - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ObsAddrs = append(m.ObsAddrs, make([]byte, postIndex-iNdEx)) - copy(m.ObsAddrs[len(m.ObsAddrs)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipHolepunch(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthHolepunch - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if hasFields[0]&uint64(0x00000001) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("type") - } +var File_pb_holepunch_proto protoreflect.FileDescriptor - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil +var file_pb_holepunch_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x70, 0x62, 0x2f, 0x68, 0x6f, 0x6c, 0x65, 0x70, 0x75, 0x6e, 0x63, 0x68, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x68, 0x6f, 0x6c, 0x65, 0x70, 0x75, 0x6e, 0x63, 0x68, 0x2e, + 0x70, 0x62, 0x22, 0x79, 0x0a, 0x09, 0x48, 0x6f, 0x6c, 0x65, 0x50, 0x75, 0x6e, 0x63, 0x68, 0x12, + 0x30, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, 0x0e, 0x32, 0x1c, 0x2e, + 0x68, 0x6f, 0x6c, 0x65, 0x70, 0x75, 0x6e, 0x63, 0x68, 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x6f, 0x6c, + 0x65, 0x50, 0x75, 0x6e, 0x63, 0x68, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x4f, 0x62, 0x73, 0x41, 0x64, 0x64, 0x72, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0c, 0x52, 0x08, 0x4f, 0x62, 0x73, 0x41, 0x64, 0x64, 0x72, 0x73, 0x22, 0x1e, 0x0a, + 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, + 0x10, 0x64, 0x12, 0x09, 0x0a, 0x04, 0x53, 0x59, 0x4e, 0x43, 0x10, 0xac, 0x02, } -func skipHolepunch(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHolepunch - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHolepunch - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHolepunch - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthHolepunch - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupHolepunch + +var ( + file_pb_holepunch_proto_rawDescOnce sync.Once + file_pb_holepunch_proto_rawDescData = file_pb_holepunch_proto_rawDesc +) + +func file_pb_holepunch_proto_rawDescGZIP() []byte { + file_pb_holepunch_proto_rawDescOnce.Do(func() { + file_pb_holepunch_proto_rawDescData = protoimpl.X.CompressGZIP(file_pb_holepunch_proto_rawDescData) + }) + return file_pb_holepunch_proto_rawDescData +} + +var file_pb_holepunch_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_pb_holepunch_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_pb_holepunch_proto_goTypes = []interface{}{ + (HolePunch_Type)(0), // 0: holepunch.pb.HolePunch.Type + (*HolePunch)(nil), // 1: holepunch.pb.HolePunch +} +var file_pb_holepunch_proto_depIdxs = []int32{ + 0, // 0: holepunch.pb.HolePunch.type:type_name -> holepunch.pb.HolePunch.Type + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_pb_holepunch_proto_init() } +func file_pb_holepunch_proto_init() { + if File_pb_holepunch_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pb_holepunch_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HolePunch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthHolepunch - } - if depth == 0 { - return iNdEx, nil } } - return 0, io.ErrUnexpectedEOF + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pb_holepunch_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pb_holepunch_proto_goTypes, + DependencyIndexes: file_pb_holepunch_proto_depIdxs, + EnumInfos: file_pb_holepunch_proto_enumTypes, + MessageInfos: file_pb_holepunch_proto_msgTypes, + }.Build() + File_pb_holepunch_proto = out.File + file_pb_holepunch_proto_rawDesc = nil + file_pb_holepunch_proto_goTypes = nil + file_pb_holepunch_proto_depIdxs = nil } - -var ( - ErrInvalidLengthHolepunch = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowHolepunch = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupHolepunch = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/svc.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/svc.go index 2f9049c8..5de7c7cf 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/svc.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/svc.go @@ -7,16 +7,17 @@ import ( "sync" "time" + logging "github.com/ipfs/go-log/v2" "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" - pb "github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb" + "github.com/libp2p/go-libp2p/p2p/host/eventbus" + "github.com/libp2p/go-libp2p/p2p/protocol/holepunch/pb" "github.com/libp2p/go-libp2p/p2p/protocol/identify" + "github.com/libp2p/go-msgio/pbio" - logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-msgio/protoio" ma "github.com/multiformats/go-multiaddr" ) @@ -123,7 +124,7 @@ func (s *Service) watchForPublicAddr() { } // Only start the holePuncher if we're behind a NAT / firewall. - sub, err := s.host.EventBus().Subscribe(&event.EvtLocalReachabilityChanged{}) + sub, err := s.host.EventBus().Subscribe(&event.EvtLocalReachabilityChanged{}, eventbus.Name("holepunch")) if err != nil { log.Debugf("failed to subscripe to Reachability event: %s", err) return @@ -185,8 +186,8 @@ func (s *Service) incomingHolePunch(str network.Stream) (rtt time.Duration, addr } defer str.Scope().ReleaseMemory(maxMsgSize) - wr := protoio.NewDelimitedWriter(str) - rd := protoio.NewDelimitedReader(str, maxMsgSize) + wr := pbio.NewDelimitedWriter(str) + rd := pbio.NewDelimitedReader(str, maxMsgSize) // Read Connect message msg := new(pb.HolePunch) diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/tracer.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/tracer.go index 87aa6136..abf31829 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/tracer.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/holepunch/tracer.go @@ -227,7 +227,8 @@ func (t *tracer) gc() { for { select { - case now := <-timer.C: + case <-timer.C: + now := time.Now() t.mutex.Lock() for id, entry := range t.peers { if entry.last.Before(now.Add(-tracerCacheDuration)) { diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id.go index b9d95240..54645e52 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id.go @@ -1,37 +1,46 @@ package identify import ( + "bytes" "context" "fmt" "io" + "sort" "sync" "time" + "golang.org/x/exp/slices" + "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/core/record" - "github.com/libp2p/go-libp2p/p2p/host/eventbus" - pb "github.com/libp2p/go-libp2p/p2p/protocol/identify/pb" - - "github.com/libp2p/go-msgio/protoio" + "github.com/libp2p/go-libp2p/p2p/protocol/identify/pb" - "github.com/gogo/protobuf/proto" logging "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-msgio/pbio" ma "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" msmux "github.com/multiformats/go-multistream" + "google.golang.org/protobuf/proto" ) +//go:generate protoc --proto_path=$PWD:$PWD/../../.. --go_out=. --go_opt=Mpb/identify.proto=./pb pb/identify.proto + var log = logging.Logger("net/identify") -// ID is the protocol.ID of version 1.0.0 of the identify -// service. -const ID = "/ipfs/id/1.0.0" +const ( + // ID is the protocol.ID of version 1.0.0 of the identify service. + ID = "/ipfs/id/1.0.0" + // IDPush is the protocol.ID of the Identify push protocol. + // It sends full identify messages containing the current state of the peer. + IDPush = "/ipfs/id/push/1.0.0" +) const DefaultProtocolVersion = "ipfs/0.1.0" @@ -39,23 +48,46 @@ const ServiceName = "libp2p.identify" const maxPushConcurrency = 32 -// StreamReadTimeout is the read timeout on all incoming Identify family streams. -var StreamReadTimeout = 60 * time.Second +var Timeout = 60 * time.Second // timeout on all incoming Identify interactions -var ( - legacyIDSize = 2 * 1024 // 2k Bytes - signedIDSize = 8 * 1024 // 8K - maxMessages = 10 - defaultUserAgent = "github.com/libp2p/go-libp2p" +const ( + legacyIDSize = 2 * 1024 // 2k Bytes + signedIDSize = 8 * 1024 // 8K + maxMessages = 10 ) -type addPeerHandlerReq struct { - rp peer.ID - resp chan *peerHandler +var defaultUserAgent = "github.com/libp2p/go-libp2p" + +type identifySnapshot struct { + seq uint64 + protocols []protocol.ID + addrs []ma.Multiaddr + record *record.Envelope } -type rmPeerHandlerReq struct { - p peer.ID +// Equal says if two snapshots are identical. +// It does NOT compare the sequence number. +func (s identifySnapshot) Equal(other *identifySnapshot) bool { + hasRecord := s.record != nil + otherHasRecord := other.record != nil + if hasRecord != otherHasRecord { + return false + } + if hasRecord && !s.record.Equal(other.record) { + return false + } + if !slices.Equal(s.protocols, other.protocols) { + return false + } + if len(s.addrs) != len(other.addrs) { + return false + } + for i, a := range s.addrs { + if !a.Equal(other.addrs[i]) { + return false + } + } + return true } type IDService interface { @@ -73,32 +105,58 @@ type IDService interface { // ObservedAddrsFor returns the addresses peers have reported we've dialed from, // for a specific local address. ObservedAddrsFor(local ma.Multiaddr) []ma.Multiaddr + Start() io.Closer } +type identifyPushSupport uint8 + +const ( + identifyPushSupportUnknown identifyPushSupport = iota + identifyPushSupported + identifyPushUnsupported +) + +type entry struct { + // The IdentifyWaitChan is created when IdentifyWait is called for the first time. + // IdentifyWait closes this channel when the Identify request completes, or when it fails. + IdentifyWaitChan chan struct{} + + // PushSupport saves our knowledge about the peer's support of the Identify Push protocol. + // Before the identify request returns, we don't know yet if the peer supports Identify Push. + PushSupport identifyPushSupport + // Sequence is the sequence number of the last snapshot we sent to this peer. + Sequence uint64 +} + // idService is a structure that implements ProtocolIdentify. // It is a trivial service that gives the other peer some // useful information about the local peer. A sort of hello. // // The idService sends: -// - Our IPFS Protocol Version -// - Our IPFS Agent Version +// - Our libp2p Protocol Version +// - Our libp2p Agent Version // - Our public Listen Addresses type idService struct { Host host.Host UserAgent string ProtocolVersion string - ctx context.Context - ctxCancel context.CancelFunc + metricsTracer MetricsTracer + + setupCompleted chan struct{} // is closed when Start has finished setting up + ctx context.Context + ctxCancel context.CancelFunc // track resources that need to be shut down before we shut down refCount sync.WaitGroup disableSignedPeerRecord bool - // Identified connections (finished and in progress). connsMu sync.RWMutex - conns map[network.Conn]chan struct{} + // The conns map contains all connections we're currently handling. + // Connections are inserted as soon as they're available in the swarm + // Connections are removed from the map when the connection disconnects. + conns map[network.Conn]entry addrMu sync.Mutex @@ -111,12 +169,10 @@ type idService struct { evtPeerIdentificationFailed event.Emitter } - addPeerHandlerCh chan addPeerHandlerReq - rmPeerHandlerCh chan rmPeerHandlerReq - - // pushSemaphore limits the push/delta concurrency to avoid storms - // that clog the transient scope. - pushSemaphore chan struct{} + currentSnapshot struct { + sync.Mutex + snapshot identifySnapshot + } } // NewIDService constructs a new *idService and activates it by @@ -137,24 +193,18 @@ func NewIDService(h host.Host, opts ...Option) (*idService, error) { protocolVersion = cfg.protocolVersion } + ctx, cancel := context.WithCancel(context.Background()) s := &idService{ - Host: h, - UserAgent: userAgent, - ProtocolVersion: protocolVersion, - - conns: make(map[network.Conn]chan struct{}), - + Host: h, + UserAgent: userAgent, + ProtocolVersion: protocolVersion, + ctx: ctx, + ctxCancel: cancel, + conns: make(map[network.Conn]entry), disableSignedPeerRecord: cfg.disableSignedPeerRecord, - - addPeerHandlerCh: make(chan addPeerHandlerReq), - rmPeerHandlerCh: make(chan rmPeerHandlerReq), - - pushSemaphore: make(chan struct{}, maxPushConcurrency), + setupCompleted: make(chan struct{}), + metricsTracer: cfg.metricsTracer, } - s.ctx, s.ctxCancel = context.WithCancel(context.Background()) - - // handle local protocol handler updates, and push deltas to peers. - var err error observedAddrs, err := NewObservedAddrManager(h) if err != nil { @@ -162,9 +212,6 @@ func NewIDService(h host.Host, opts ...Option) (*idService, error) { } s.observedAddrs = observedAddrs - s.refCount.Add(1) - go s.loop() - s.emitters.evtPeerProtocolsUpdated, err = h.EventBus().Emitter(&event.EvtPeerProtocolsUpdated{}) if err != nil { log.Warnf("identify service not emitting peer protocol updates; err: %s", err) @@ -177,115 +224,125 @@ func NewIDService(h host.Host, opts ...Option) (*idService, error) { if err != nil { log.Warnf("identify service not emitting identification failed events; err: %s", err) } + return s, nil +} - // register protocols that do not depend on peer records. - h.SetStreamHandler(IDDelta, s.deltaHandler) - h.SetStreamHandler(ID, s.sendIdentifyResp) - h.SetStreamHandler(IDPush, s.pushHandler) +func (ids *idService) Start() { + ids.Host.Network().Notify((*netNotifiee)(ids)) + ids.Host.SetStreamHandler(ID, ids.handleIdentifyRequest) + ids.Host.SetStreamHandler(IDPush, ids.handlePush) + ids.updateSnapshot() + close(ids.setupCompleted) - h.Network().Notify((*netNotifiee)(s)) - return s, nil + ids.refCount.Add(1) + go ids.loop(ids.ctx) } -func (ids *idService) loop() { +func (ids *idService) loop(ctx context.Context) { defer ids.refCount.Done() - phs := make(map[peer.ID]*peerHandler) - sub, err := ids.Host.EventBus().Subscribe([]interface{}{ - &event.EvtLocalProtocolsUpdated{}, - &event.EvtLocalAddressesUpdated{}, - }, eventbus.BufSize(256)) + sub, err := ids.Host.EventBus().Subscribe( + []any{&event.EvtLocalProtocolsUpdated{}, &event.EvtLocalAddressesUpdated{}}, + eventbus.BufSize(256), + eventbus.Name("identify (loop)"), + ) if err != nil { log.Errorf("failed to subscribe to events on the bus, err=%s", err) return } + defer sub.Close() - phClosedCh := make(chan peer.ID) + // Send pushes from a separate Go routine. + // That way, we can end up with + // * this Go routine busy looping over all peers in sendPushes + // * another push being queued in the triggerPush channel + triggerPush := make(chan struct{}, 1) + ids.refCount.Add(1) + go func() { + defer ids.refCount.Done() - defer func() { - sub.Close() - // The context will cancel the workers. Now, wait for them to - // exit. - for range phs { - <-phClosedCh + for { + select { + case <-ctx.Done(): + return + case <-triggerPush: + ids.sendPushes(ctx) + } } }() - // Use a fresh context for the handlers. Otherwise, they'll get canceled - // before we're ready to shutdown and they'll have "stopped" without us - // _calling_ stop. - handlerCtx, cancel := context.WithCancel(context.Background()) - defer cancel() - for { select { - case addReq := <-ids.addPeerHandlerCh: - rp := addReq.rp - ph, ok := phs[rp] - if !ok && ids.Host.Network().Connectedness(rp) == network.Connected { - ph = newPeerHandler(rp, ids) - ph.start(handlerCtx, func() { phClosedCh <- rp }) - phs[rp] = ph + case e, ok := <-sub.Out(): + if !ok { + return } - addReq.resp <- ph - case rmReq := <-ids.rmPeerHandlerCh: - rp := rmReq.p - if ids.Host.Network().Connectedness(rp) != network.Connected { - // before we remove the peerhandler, we should ensure that it will not send any - // more messages. Otherwise, we might create a new handler and the Identify response - // synchronized with the new handler might be overwritten by a message sent by this "old" handler. - ph, ok := phs[rp] - if !ok { - // move on, move on, there's nothing to see here. - continue - } - // This is idempotent if already stopped. - ph.stop() + if updated := ids.updateSnapshot(); !updated { + continue } - - case rp := <-phClosedCh: - ph := phs[rp] - - // If we are connected to the peer, it means that we got a connection from the peer - // before we could finish removing it's handler on the previous disconnection. - // If we delete the handler, we wont be able to push updates to it - // till we see a new connection. So, we should restart the handler. - // The fact that we got the handler on this channel means that it's context and handler - // have completed because we write the handler to this chanel only after it closed. - if ids.Host.Network().Connectedness(rp) == network.Connected { - ph.start(handlerCtx, func() { phClosedCh <- rp }) - } else { - delete(phs, rp) + if ids.metricsTracer != nil { + ids.metricsTracer.TriggeredPushes(e) } + select { + case triggerPush <- struct{}{}: + default: // we already have one more push queued, no need to queue another one + } + case <-ctx.Done(): + return + } + } +} + +func (ids *idService) sendPushes(ctx context.Context) { + ids.connsMu.RLock() + conns := make([]network.Conn, 0, len(ids.conns)) + for c, e := range ids.conns { + // Push even if we don't know if push is supported. + // This will be only the case while the IdentifyWaitChan call is in flight. + if e.PushSupport == identifyPushSupported || e.PushSupport == identifyPushSupportUnknown { + conns = append(conns, c) + } + } + ids.connsMu.RUnlock() - case e, more := <-sub.Out(): - if !more { + sem := make(chan struct{}, maxPushConcurrency) + var wg sync.WaitGroup + for _, c := range conns { + // check if the connection is still alive + ids.connsMu.RLock() + e, ok := ids.conns[c] + ids.connsMu.RUnlock() + if !ok { + continue + } + // check if we already sent the current snapshot to this peer + ids.currentSnapshot.Lock() + snapshot := ids.currentSnapshot.snapshot + ids.currentSnapshot.Unlock() + if e.Sequence >= snapshot.seq { + log.Debugw("already sent this snapshot to peer", "peer", c.RemotePeer(), "seq", snapshot.seq) + continue + } + // we haven't, send it now + sem <- struct{}{} + wg.Add(1) + go func(c network.Conn) { + defer wg.Done() + defer func() { <-sem }() + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + str, err := ids.Host.NewStream(ctx, c.RemotePeer(), IDPush) + if err != nil { // connection might have been closed recently return } - switch e.(type) { - case event.EvtLocalAddressesUpdated: - for pid := range phs { - select { - case phs[pid].pushCh <- struct{}{}: - default: - log.Debugf("dropping addr updated message for %s as buffer full", pid.Pretty()) - } - } - - case event.EvtLocalProtocolsUpdated: - for pid := range phs { - select { - case phs[pid].deltaCh <- struct{}{}: - default: - log.Debugf("dropping protocol updated message for %s as buffer full", pid.Pretty()) - } - } + // TODO: find out if the peer supports push if we didn't have any information about push support + if err := ids.sendIdentifyResp(str, true); err != nil { + log.Debugw("failed to send identify push", "peer", c.RemotePeer(), "error", err) + return } - - case <-ids.ctx.Done(): - return - } + }(c) } + wg.Wait() } // Close shuts down the idService @@ -304,60 +361,68 @@ func (ids *idService) ObservedAddrsFor(local ma.Multiaddr) []ma.Multiaddr { return ids.observedAddrs.AddrsFor(local) } +// IdentifyConn runs the Identify protocol on a connection. +// It returns when we've received the peer's Identify message (or the request fails). +// If successful, the peer store will contain the peer's addresses and supported protocols. func (ids *idService) IdentifyConn(c network.Conn) { <-ids.IdentifyWait(c) } +// IdentifyWait runs the Identify protocol on a connection. +// It doesn't block and returns a channel that is closed when we receive +// the peer's Identify message (or the request fails). +// If successful, the peer store will contain the peer's addresses and supported protocols. func (ids *idService) IdentifyWait(c network.Conn) <-chan struct{} { - ids.connsMu.RLock() - wait, found := ids.conns[c] - ids.connsMu.RUnlock() - - if found { - return wait - } - ids.connsMu.Lock() defer ids.connsMu.Unlock() - wait, found = ids.conns[c] + e, found := ids.conns[c] if !found { - wait = make(chan struct{}) - ids.conns[c] = wait - - // Spawn an identify. The connection may actually be closed - // already, but that doesn't really matter. We'll fail to open a - // stream then forget the connection. - go func() { - defer close(wait) - if err := ids.identifyConn(c); err != nil { - log.Warnf("failed to identify %s: %s", c.RemotePeer(), err) - ids.emitters.evtPeerIdentificationFailed.Emit(event.EvtPeerIdentificationFailed{Peer: c.RemotePeer(), Reason: err}) - return - } - ids.emitters.evtPeerIdentificationCompleted.Emit(event.EvtPeerIdentificationCompleted{Peer: c.RemotePeer()}) - }() + // No entry found. We may have gotten an out of order notification. Check it we should have this conn (because we're still connected) + // We hold the ids.connsMu lock so this is safe since a disconnect event will be processed later if we are connected. + if c.IsClosed() { + log.Debugw("connection not found in identify service", "peer", c.RemotePeer()) + ch := make(chan struct{}) + close(ch) + return ch + } else { + ids.addConnWithLock(c) + } } - return wait -} + if e.IdentifyWaitChan != nil { + return e.IdentifyWaitChan + } + // First call to IdentifyWait for this connection. Create the channel. + e.IdentifyWaitChan = make(chan struct{}) + ids.conns[c] = e -func (ids *idService) removeConn(c network.Conn) { - ids.connsMu.Lock() - delete(ids.conns, c) - ids.connsMu.Unlock() + // Spawn an identify. The connection may actually be closed + // already, but that doesn't really matter. We'll fail to open a + // stream then forget the connection. + go func() { + defer close(e.IdentifyWaitChan) + if err := ids.identifyConn(c); err != nil { + log.Warnf("failed to identify %s: %s", c.RemotePeer(), err) + ids.emitters.evtPeerIdentificationFailed.Emit(event.EvtPeerIdentificationFailed{Peer: c.RemotePeer(), Reason: err}) + return + } + + ids.emitters.evtPeerIdentificationCompleted.Emit(event.EvtPeerIdentificationCompleted{Peer: c.RemotePeer()}) + }() + + return e.IdentifyWaitChan } func (ids *idService) identifyConn(c network.Conn) error { - s, err := c.NewStream(network.WithUseTransient(context.TODO(), "identify")) + ctx, cancel := context.WithTimeout(context.Background(), Timeout) + defer cancel() + s, err := c.NewStream(network.WithUseTransient(ctx, "identify")) if err != nil { - log.Debugw("error opening identify stream", "error", err) - - // We usually do this on disconnect, but we may have already - // processed the disconnect event. - ids.removeConn(c) + log.Debugw("error opening identify stream", "peer", c.RemotePeer(), "error", err) return err } + s.SetDeadline(time.Now().Add(Timeout)) if err := s.SetProtocol(ID); err != nil { log.Warnf("error setting identify protocol for stream: %s", err) @@ -371,48 +436,60 @@ func (ids *idService) identifyConn(c network.Conn) error { return err } - return ids.handleIdentifyResponse(s) + return ids.handleIdentifyResponse(s, false) +} + +// handlePush handles incoming identify push streams +func (ids *idService) handlePush(s network.Stream) { + s.SetDeadline(time.Now().Add(Timeout)) + ids.handleIdentifyResponse(s, true) +} + +func (ids *idService) handleIdentifyRequest(s network.Stream) { + _ = ids.sendIdentifyResp(s, false) } -func (ids *idService) sendIdentifyResp(s network.Stream) { +func (ids *idService) sendIdentifyResp(s network.Stream, isPush bool) error { if err := s.Scope().SetService(ServiceName); err != nil { - log.Warnf("error attaching stream to identify service: %s", err) s.Reset() - return + return fmt.Errorf("failed to attaching stream to identify service: %w", err) } - defer s.Close() - c := s.Conn() + ids.currentSnapshot.Lock() + snapshot := ids.currentSnapshot.snapshot + ids.currentSnapshot.Unlock() - phCh := make(chan *peerHandler, 1) - select { - case ids.addPeerHandlerCh <- addPeerHandlerReq{c.RemotePeer(), phCh}: - case <-ids.ctx.Done(): - return - } + log.Debugw("sending snapshot", "seq", snapshot.seq, "protocols", snapshot.protocols, "addrs", snapshot.addrs) - var ph *peerHandler - select { - case ph = <-phCh: - case <-ids.ctx.Done(): - return + mes := ids.createBaseIdentifyResponse(s.Conn(), &snapshot) + mes.SignedPeerRecord = ids.getSignedRecord(&snapshot) + + log.Debugf("%s sending message to %s %s", ID, s.Conn().RemotePeer(), s.Conn().RemoteMultiaddr()) + if err := ids.writeChunkedIdentifyMsg(s, mes); err != nil { + return err } - if ph == nil { - // Peer disconnected, abort. - s.Reset() - return + if ids.metricsTracer != nil { + ids.metricsTracer.IdentifySent(isPush, len(mes.Protocols), len(mes.ListenAddrs)) } - ph.snapshotMu.RLock() - snapshot := ph.snapshot - ph.snapshotMu.RUnlock() - ids.writeChunkedIdentifyMsg(c, snapshot, s) - log.Debugf("%s sent message to %s %s", ID, c.RemotePeer(), c.RemoteMultiaddr()) + ids.connsMu.Lock() + defer ids.connsMu.Unlock() + e, ok := ids.conns[s.Conn()] + // The connection might already have been closed. + // We *should* receive the Connected notification from the swarm before we're able to accept the peer's + // Identify stream, but if that for some reason doesn't work, we also wouldn't have a map entry here. + // The only consequence would be that we send a spurious Push to that peer later. + if !ok { + return nil + } + e.Sequence = snapshot.seq + ids.conns[s.Conn()] = e + return nil } -func (ids *idService) handleIdentifyResponse(s network.Stream) error { +func (ids *idService) handleIdentifyResponse(s network.Stream, isPush bool) error { if err := s.Scope().SetService(ServiceName); err != nil { log.Warnf("error attaching stream to identify service: %s", err) s.Reset() @@ -426,11 +503,9 @@ func (ids *idService) handleIdentifyResponse(s network.Stream) error { } defer s.Scope().ReleaseMemory(signedIDSize) - _ = s.SetReadDeadline(time.Now().Add(StreamReadTimeout)) - c := s.Conn() - r := protoio.NewDelimitedReader(s, signedIDSize) + r := pbio.NewDelimitedReader(s, signedIDSize) mes := &pb.Identify{} if err := readAllIDMessages(r, mes); err != nil { @@ -443,12 +518,34 @@ func (ids *idService) handleIdentifyResponse(s network.Stream) error { log.Debugf("%s received message from %s %s", s.Protocol(), c.RemotePeer(), c.RemoteMultiaddr()) - ids.consumeMessage(mes, c) + ids.consumeMessage(mes, c, isPush) + + if ids.metricsTracer != nil { + ids.metricsTracer.IdentifyReceived(isPush, len(mes.Protocols), len(mes.ListenAddrs)) + } + + ids.connsMu.Lock() + defer ids.connsMu.Unlock() + e, ok := ids.conns[c] + if !ok { // might already have disconnected + return nil + } + sup, err := ids.Host.Peerstore().SupportsProtocols(c.RemotePeer(), IDPush) + if supportsIdentifyPush := err == nil && len(sup) > 0; supportsIdentifyPush { + e.PushSupport = identifyPushSupported + } else { + e.PushSupport = identifyPushUnsupported + } + + if ids.metricsTracer != nil { + ids.metricsTracer.ConnPushSupport(e.PushSupport) + } + ids.conns[c] = e return nil } -func readAllIDMessages(r protoio.Reader, finalMsg proto.Message) error { +func readAllIDMessages(r pbio.Reader, finalMsg proto.Message) error { mes := &pb.Identify{} for i := 0; i < maxMessages; i++ { switch err := r.ReadMsg(mes); err { @@ -464,49 +561,60 @@ func readAllIDMessages(r protoio.Reader, finalMsg proto.Message) error { return fmt.Errorf("too many parts") } -func (ids *idService) getSnapshot() *identifySnapshot { - snapshot := new(identifySnapshot) +func (ids *idService) updateSnapshot() (updated bool) { + addrs := ids.Host.Addrs() + sort.Slice(addrs, func(i, j int) bool { return bytes.Compare(addrs[i].Bytes(), addrs[j].Bytes()) == -1 }) + protos := ids.Host.Mux().Protocols() + sort.Slice(protos, func(i, j int) bool { return protos[i] < protos[j] }) + snapshot := identifySnapshot{ + addrs: addrs, + protocols: protos, + } + if !ids.disableSignedPeerRecord { if cab, ok := peerstore.GetCertifiedAddrBook(ids.Host.Peerstore()); ok { snapshot.record = cab.GetPeerRecord(ids.Host.ID()) } } - snapshot.addrs = ids.Host.Addrs() - snapshot.protocols = ids.Host.Mux().Protocols() - return snapshot + + ids.currentSnapshot.Lock() + defer ids.currentSnapshot.Unlock() + + if ids.currentSnapshot.snapshot.Equal(&snapshot) { + return false + } + + snapshot.seq = ids.currentSnapshot.snapshot.seq + 1 + ids.currentSnapshot.snapshot = snapshot + + log.Debugw("updating snapshot", "seq", snapshot.seq, "addrs", snapshot.addrs) + return true } -func (ids *idService) writeChunkedIdentifyMsg(c network.Conn, snapshot *identifySnapshot, s network.Stream) error { - mes := ids.createBaseIdentifyResponse(c, snapshot) - sr := ids.getSignedRecord(snapshot) - mes.SignedPeerRecord = sr - writer := protoio.NewDelimitedWriter(s) +func (ids *idService) writeChunkedIdentifyMsg(s network.Stream, mes *pb.Identify) error { + writer := pbio.NewDelimitedWriter(s) - if sr == nil || proto.Size(mes) <= legacyIDSize { + if mes.SignedPeerRecord == nil || proto.Size(mes) <= legacyIDSize { return writer.WriteMsg(mes) } + + sr := mes.SignedPeerRecord mes.SignedPeerRecord = nil if err := writer.WriteMsg(mes); err != nil { return err } - // then write just the signed record - m := &pb.Identify{SignedPeerRecord: sr} - err := writer.WriteMsg(m) - return err + return writer.WriteMsg(&pb.Identify{SignedPeerRecord: sr}) } -func (ids *idService) createBaseIdentifyResponse( - conn network.Conn, - snapshot *identifySnapshot, -) *pb.Identify { +func (ids *idService) createBaseIdentifyResponse(conn network.Conn, snapshot *identifySnapshot) *pb.Identify { mes := &pb.Identify{} remoteAddr := conn.RemoteMultiaddr() localAddr := conn.LocalMultiaddr() // set protocols this node is currently handling - mes.Protocols = snapshot.protocols + mes.Protocols = protocol.ConvertToStrings(snapshot.protocols) // observed address so other side is informed of their // "public" address, at least in relation to us. @@ -565,11 +673,50 @@ func (ids *idService) getSignedRecord(snapshot *identifySnapshot) []byte { return recBytes } -func (ids *idService) consumeMessage(mes *pb.Identify, c network.Conn) { +// diff takes two slices of strings (a and b) and computes which elements were added and removed in b +func diff(a, b []protocol.ID) (added, removed []protocol.ID) { + // This is O(n^2), but it's fine because the slices are small. + for _, x := range b { + var found bool + for _, y := range a { + if x == y { + found = true + break + } + } + if !found { + added = append(added, x) + } + } + for _, x := range a { + var found bool + for _, y := range b { + if x == y { + found = true + break + } + } + if !found { + removed = append(removed, x) + } + } + return +} + +func (ids *idService) consumeMessage(mes *pb.Identify, c network.Conn, isPush bool) { p := c.RemotePeer() - // mes.Protocols - ids.Host.Peerstore().SetProtocols(p, mes.Protocols...) + supported, _ := ids.Host.Peerstore().GetProtocols(p) + mesProtocols := protocol.ConvertFromStrings(mes.Protocols) + added, removed := diff(supported, mesProtocols) + ids.Host.Peerstore().SetProtocols(p, mesProtocols...) + if isPush { + ids.emitters.evtPeerProtocolsUpdated.Emit(event.EvtPeerProtocolsUpdated{ + Peer: p, + Added: added, + Removed: removed, + }) + } // mes.ObservedAddr ids.consumeObservedAddress(mes.GetObservedAddr(), c) @@ -597,7 +744,6 @@ func (ids *idService) consumeMessage(mes *pb.Identify, c network.Conn) { // add certified addresses for the peer, if they sent us a signed peer record // otherwise use the unsigned addresses. - var signedPeerRecord *record.Envelope signedPeerRecord, err := signedPeerRecordFromMessage(mes) if err != nil { log.Errorf("error getting peer record from Identify message: %v", err) @@ -621,10 +767,14 @@ func (ids *idService) consumeMessage(mes *pb.Identify, c network.Conn) { // add signed addrs if we have them and the peerstore supports them cab, ok := peerstore.GetCertifiedAddrBook(ids.Host.Peerstore()) - if ok && signedPeerRecord != nil { - _, addErr := cab.ConsumePeerRecord(signedPeerRecord, ttl) - if addErr != nil { - log.Debugf("error adding signed addrs to peerstore: %v", addErr) + if ok && signedPeerRecord != nil && signedPeerRecord.PublicKey != nil { + id, err := peer.IDFromPublicKey(signedPeerRecord.PublicKey) + if err != nil { + log.Debugf("failed to derive peer ID from peer record: %s", err) + } else if id != c.RemotePeer() { + log.Debugf("received signed peer record for unexpected peer ID. expected %s, got %s", c.RemotePeer(), id) + } else if _, err := cab.ConsumePeerRecord(signedPeerRecord, ttl); err != nil { + log.Debugf("error adding signed addrs to peerstore: %v", err) } } else { ids.Host.Peerstore().AddAddrs(p, lmaddrs, ttl) @@ -764,6 +914,15 @@ func (ids *idService) consumeObservedAddress(observed []byte, c network.Conn) { ids.observedAddrs.Record(c, maddr) } +// addConnWithLock assuems caller holds the connsMu lock +func (ids *idService) addConnWithLock(c network.Conn) { + _, found := ids.conns[c] + if !found { + <-ids.setupCompleted + ids.conns[c] = entry{} + } +} + func signedPeerRecordFromMessage(msg *pb.Identify) (*record.Envelope, error) { if msg.SignedPeerRecord == nil || len(msg.SignedPeerRecord) == 0 { return nil, nil @@ -772,38 +931,37 @@ func signedPeerRecordFromMessage(msg *pb.Identify) (*record.Envelope, error) { return env, err } -// netNotifiee defines methods to be used with the IpfsDHT +// netNotifiee defines methods to be used with the swarm type netNotifiee idService func (nn *netNotifiee) IDService() *idService { return (*idService)(nn) } -func (nn *netNotifiee) Connected(n network.Network, v network.Conn) { - nn.IDService().IdentifyWait(v) +func (nn *netNotifiee) Connected(_ network.Network, c network.Conn) { + ids := nn.IDService() + + ids.connsMu.Lock() + ids.addConnWithLock(c) + ids.connsMu.Unlock() + + nn.IDService().IdentifyWait(c) } -func (nn *netNotifiee) Disconnected(n network.Network, v network.Conn) { +func (nn *netNotifiee) Disconnected(_ network.Network, c network.Conn) { ids := nn.IDService() // Stop tracking the connection. - ids.removeConn(v) - - // undo the setting of addresses to peer.ConnectedAddrTTL we did - ids.addrMu.Lock() - defer ids.addrMu.Unlock() - - if ids.Host.Network().Connectedness(v.RemotePeer()) != network.Connected { - // consider removing the peer handler for this - select { - case ids.rmPeerHandlerCh <- rmPeerHandlerReq{v.RemotePeer()}: - case <-ids.ctx.Done(): - return - } + ids.connsMu.Lock() + delete(ids.conns, c) + ids.connsMu.Unlock() + if ids.Host.Network().Connectedness(c.RemotePeer()) != network.Connected { // Last disconnect. - ps := ids.Host.Peerstore() - ps.UpdateAddrs(v.RemotePeer(), peerstore.ConnectedAddrTTL, peerstore.RecentlyConnectedAddrTTL) + // Undo the setting of addresses to peer.ConnectedAddrTTL we did + ids.addrMu.Lock() + defer ids.addrMu.Unlock() + ids.Host.Peerstore().UpdateAddrs(c.RemotePeer(), peerstore.ConnectedAddrTTL, peerstore.RecentlyConnectedAddrTTL) } } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_delta.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_delta.go deleted file mode 100644 index 7f7c75f1..00000000 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_delta.go +++ /dev/null @@ -1,82 +0,0 @@ -package identify - -import ( - "time" - - "github.com/libp2p/go-libp2p/core/event" - "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/protocol" - pb "github.com/libp2p/go-libp2p/p2p/protocol/identify/pb" - - "github.com/libp2p/go-msgio/protoio" -) - -const IDDelta = "/p2p/id/delta/1.0.0" - -const deltaMsgSize = 2048 - -// deltaHandler handles incoming delta updates from peers. -func (ids *idService) deltaHandler(s network.Stream) { - if err := s.Scope().SetService(ServiceName); err != nil { - log.Warnf("error attaching stream to identify service: %s", err) - s.Reset() - return - } - - if err := s.Scope().ReserveMemory(deltaMsgSize, network.ReservationPriorityAlways); err != nil { - log.Warnf("error reserving memory for identify stream: %s", err) - s.Reset() - return - } - defer s.Scope().ReleaseMemory(deltaMsgSize) - - _ = s.SetReadDeadline(time.Now().Add(StreamReadTimeout)) - - c := s.Conn() - - r := protoio.NewDelimitedReader(s, deltaMsgSize) - mes := pb.Identify{} - if err := r.ReadMsg(&mes); err != nil { - log.Warn("error reading identify message: ", err) - _ = s.Reset() - return - } - - defer s.Close() - - log.Debugf("%s received message from %s %s", s.Protocol(), c.RemotePeer(), c.RemoteMultiaddr()) - - delta := mes.GetDelta() - if delta == nil { - return - } - - p := s.Conn().RemotePeer() - if err := ids.consumeDelta(p, delta); err != nil { - _ = s.Reset() - log.Warnf("delta update from peer %s failed: %s", p, err) - } -} - -// consumeDelta processes an incoming delta from a peer, updating the peerstore -// and emitting the appropriate events. -func (ids *idService) consumeDelta(id peer.ID, delta *pb.Delta) error { - err := ids.Host.Peerstore().AddProtocols(id, delta.GetAddedProtocols()...) - if err != nil { - return err - } - - err = ids.Host.Peerstore().RemoveProtocols(id, delta.GetRmProtocols()...) - if err != nil { - return err - } - - evt := event.EvtPeerProtocolsUpdated{ - Peer: id, - Added: protocol.ConvertFromStrings(delta.GetAddedProtocols()), - Removed: protocol.ConvertFromStrings(delta.GetRmProtocols()), - } - ids.emitters.evtPeerProtocolsUpdated.Emit(evt) - return nil -} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_go117.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_go117.go deleted file mode 100644 index 22be28a5..00000000 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_go117.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build !go1.18 - -package identify - -import ( - "fmt" - "runtime/debug" -) - -func init() { - bi, ok := debug.ReadBuildInfo() - // ok will only be true if this is built as a dependency of another module - if !ok { - return - } - version := bi.Main.Version - if version == "(devel)" { - defaultUserAgent = bi.Main.Path - } else { - defaultUserAgent = fmt.Sprintf("%s@%s", bi.Main.Path, bi.Main.Version) - } -} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_push.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_push.go deleted file mode 100644 index cbb47a9f..00000000 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_push.go +++ /dev/null @@ -1,17 +0,0 @@ -package identify - -import ( - "github.com/libp2p/go-libp2p/core/network" -) - -// IDPush is the protocol.ID of the Identify push protocol. It sends full identify messages containing -// the current state of the peer. -// -// It is in the process of being replaced by identify delta, which sends only diffs for better -// resource utilisation. -const IDPush = "/ipfs/id/push/1.0.0" - -// pushHandler handles incoming identify push streams. The behaviour is identical to the ordinary identify protocol. -func (ids *idService) pushHandler(s network.Stream) { - ids.handleIdentifyResponse(s) -} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/metrics.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/metrics.go new file mode 100644 index 00000000..28598fa3 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/metrics.go @@ -0,0 +1,206 @@ +package identify + +import ( + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/p2p/metricshelper" + + "github.com/prometheus/client_golang/prometheus" +) + +const metricNamespace = "libp2p_identify" + +var ( + pushesTriggered = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "identify_pushes_triggered_total", + Help: "Pushes Triggered", + }, + []string{"trigger"}, + ) + identify = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "identify_total", + Help: "Identify", + }, + []string{"dir"}, + ) + identifyPush = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "identify_push_total", + Help: "Identify Push", + }, + []string{"dir"}, + ) + connPushSupportTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: metricNamespace, + Name: "conn_push_support_total", + Help: "Identify Connection Push Support", + }, + []string{"support"}, + ) + protocolsCount = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: metricNamespace, + Name: "protocols_count", + Help: "Protocols Count", + }, + ) + addrsCount = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: metricNamespace, + Name: "addrs_count", + Help: "Address Count", + }, + ) + numProtocolsReceived = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: metricNamespace, + Name: "protocols_received", + Help: "Number of Protocols received", + Buckets: buckets, + }, + ) + numAddrsReceived = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: metricNamespace, + Name: "addrs_received", + Help: "Number of addrs received", + Buckets: buckets, + }, + ) + collectors = []prometheus.Collector{ + pushesTriggered, + identify, + identifyPush, + connPushSupportTotal, + protocolsCount, + addrsCount, + numProtocolsReceived, + numAddrsReceived, + } + // 1 to 20 and then up to 100 in steps of 5 + buckets = append( + prometheus.LinearBuckets(1, 1, 20), + prometheus.LinearBuckets(25, 5, 16)..., + ) +) + +type MetricsTracer interface { + // TriggeredPushes counts IdentifyPushes triggered by event + TriggeredPushes(event any) + + // ConnPushSupport counts peers by Push Support + ConnPushSupport(identifyPushSupport) + + // IdentifyReceived tracks metrics on receiving an identify response + IdentifyReceived(isPush bool, numProtocols int, numAddrs int) + + // IdentifySent tracks metrics on sending an identify response + IdentifySent(isPush bool, numProtocols int, numAddrs int) +} + +type metricsTracer struct{} + +var _ MetricsTracer = &metricsTracer{} + +type metricsTracerSetting struct { + reg prometheus.Registerer +} + +type MetricsTracerOption func(*metricsTracerSetting) + +func WithRegisterer(reg prometheus.Registerer) MetricsTracerOption { + return func(s *metricsTracerSetting) { + if reg != nil { + s.reg = reg + } + } +} + +func NewMetricsTracer(opts ...MetricsTracerOption) MetricsTracer { + setting := &metricsTracerSetting{reg: prometheus.DefaultRegisterer} + for _, opt := range opts { + opt(setting) + } + metricshelper.RegisterCollectors(setting.reg, collectors...) + return &metricsTracer{} +} + +func (t *metricsTracer) TriggeredPushes(ev any) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + + typ := "unknown" + switch ev.(type) { + case event.EvtLocalProtocolsUpdated: + typ = "protocols_updated" + case event.EvtLocalAddressesUpdated: + typ = "addresses_updated" + } + *tags = append(*tags, typ) + pushesTriggered.WithLabelValues(*tags...).Inc() +} + +func (t *metricsTracer) IncrementPushSupport(s identifyPushSupport) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + + *tags = append(*tags, getPushSupport(s)) + connPushSupportTotal.WithLabelValues(*tags...).Inc() +} + +func (t *metricsTracer) IdentifySent(isPush bool, numProtocols int, numAddrs int) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + + if isPush { + *tags = append(*tags, metricshelper.GetDirection(network.DirOutbound)) + identifyPush.WithLabelValues(*tags...).Inc() + } else { + *tags = append(*tags, metricshelper.GetDirection(network.DirInbound)) + identify.WithLabelValues(*tags...).Inc() + } + + protocolsCount.Set(float64(numProtocols)) + addrsCount.Set(float64(numAddrs)) +} + +func (t *metricsTracer) IdentifyReceived(isPush bool, numProtocols int, numAddrs int) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + + if isPush { + *tags = append(*tags, metricshelper.GetDirection(network.DirInbound)) + identifyPush.WithLabelValues(*tags...).Inc() + } else { + *tags = append(*tags, metricshelper.GetDirection(network.DirOutbound)) + identify.WithLabelValues(*tags...).Inc() + } + + numProtocolsReceived.Observe(float64(numProtocols)) + numAddrsReceived.Observe(float64(numAddrs)) +} + +func (t *metricsTracer) ConnPushSupport(support identifyPushSupport) { + tags := metricshelper.GetStringSlice() + defer metricshelper.PutStringSlice(tags) + + *tags = append(*tags, getPushSupport(support)) + connPushSupportTotal.WithLabelValues(*tags...).Inc() +} + +func getPushSupport(s identifyPushSupport) string { + switch s { + case identifyPushSupported: + return "supported" + case identifyPushUnsupported: + return "not supported" + default: + return "unknown" + } +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/obsaddr.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/obsaddr.go index bd72175d..451af096 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/obsaddr.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/obsaddr.go @@ -141,7 +141,7 @@ func NewObservedAddrManager(host host.Host) (*ObservedAddrManager, error) { } oas.ctx, oas.ctxCancel = context.WithCancel(context.Background()) - reachabilitySub, err := host.EventBus().Subscribe(new(event.EvtLocalReachabilityChanged)) + reachabilitySub, err := host.EventBus().Subscribe(new(event.EvtLocalReachabilityChanged), eventbus.Name("identify (obsaddr)")) if err != nil { return nil, fmt.Errorf("failed to subscribe to reachability event: %s", err) } @@ -356,54 +356,99 @@ func (oas *ObservedAddrManager) removeConn(conn network.Conn) { oas.activeConnsMu.Unlock() } -func (oas *ObservedAddrManager) maybeRecordObservation(conn network.Conn, observed ma.Multiaddr) { +type normalizeMultiaddrer interface { + NormalizeMultiaddr(addr ma.Multiaddr) ma.Multiaddr +} + +type addrsProvider interface { + Addrs() []ma.Multiaddr +} + +type listenAddrsProvider interface { + ListenAddresses() []ma.Multiaddr + InterfaceListenAddresses() ([]ma.Multiaddr, error) +} + +func shouldRecordObservation(host addrsProvider, network listenAddrsProvider, conn network.ConnMultiaddrs, observed ma.Multiaddr) bool { // First, determine if this observation is even worth keeping... // Ignore observations from loopback nodes. We already know our loopback // addresses. if manet.IsIPLoopback(observed) { - return + return false } // we should only use ObservedAddr when our connection's LocalAddr is one // of our ListenAddrs. If we Dial out using an ephemeral addr, knowing that // address's external mapping is not very useful because the port will not be // the same as the listen addr. - ifaceaddrs, err := oas.host.Network().InterfaceListenAddresses() + ifaceaddrs, err := network.InterfaceListenAddresses() if err != nil { log.Infof("failed to get interface listen addrs", err) - return + return false + } + + normalizer, canNormalize := host.(normalizeMultiaddrer) + + if canNormalize { + for i, a := range ifaceaddrs { + ifaceaddrs[i] = normalizer.NormalizeMultiaddr(a) + } } local := conn.LocalMultiaddr() - if !ma.Contains(ifaceaddrs, local) && !ma.Contains(oas.host.Network().ListenAddresses(), local) { + if canNormalize { + local = normalizer.NormalizeMultiaddr(local) + } + + listenAddrs := network.ListenAddresses() + if canNormalize { + for i, a := range listenAddrs { + listenAddrs[i] = normalizer.NormalizeMultiaddr(a) + } + } + + if !ma.Contains(ifaceaddrs, local) && !ma.Contains(listenAddrs, local) { // not in our list - return + return false + } + + hostAddrs := host.Addrs() + if canNormalize { + for i, a := range hostAddrs { + hostAddrs[i] = normalizer.NormalizeMultiaddr(a) + } } // We should reject the connection if the observation doesn't match the // transports of one of our advertised addresses. - if !HasConsistentTransport(observed, oas.host.Addrs()) && - !HasConsistentTransport(observed, oas.host.Network().ListenAddresses()) { + if !HasConsistentTransport(observed, hostAddrs) && + !HasConsistentTransport(observed, listenAddrs) { log.Debugw( "observed multiaddr doesn't match the transports of any announced addresses", "from", conn.RemoteMultiaddr(), "observed", observed, ) - return + return false } - // Ok, the observation is good, record it. - log.Debugw("added own observed listen addr", "observed", observed) + return true +} - defer oas.addConn(conn, observed) +func (oas *ObservedAddrManager) maybeRecordObservation(conn network.Conn, observed ma.Multiaddr) { + shouldRecord := shouldRecordObservation(oas.host, oas.host.Network(), conn, observed) + if shouldRecord { + // Ok, the observation is good, record it. + log.Debugw("added own observed listen addr", "observed", observed) + defer oas.addConn(conn, observed) - oas.mu.Lock() - defer oas.mu.Unlock() - oas.recordObservationUnlocked(conn, observed) + oas.mu.Lock() + defer oas.mu.Unlock() + oas.recordObservationUnlocked(conn, observed) - if oas.reachability == network.ReachabilityPrivate { - oas.emitAllNATTypes() + if oas.reachability == network.ReachabilityPrivate { + oas.emitAllNATTypes() + } } } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/opts.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/opts.go index 38f505b6..f1886656 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/opts.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/opts.go @@ -4,6 +4,7 @@ type config struct { protocolVersion string userAgent string disableSignedPeerRecord bool + metricsTracer MetricsTracer } // Option is an option function for identify. @@ -31,3 +32,9 @@ func DisableSignedPeerRecord() Option { cfg.disableSignedPeerRecord = true } } + +func WithMetricsTracer(tr MetricsTracer) Option { + return func(cfg *config) { + cfg.metricsTracer = tr + } +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/Makefile b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/Makefile deleted file mode 100644 index eb14b576..00000000 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -PB = $(wildcard *.proto) -GO = $(PB:.proto=.pb.go) - -all: $(GO) - -%.pb.go: %.proto - protoc --proto_path=$(GOPATH)/src:. --gogofast_out=. $< - -clean: - rm -f *.pb.go - rm -f *.go diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/identify.pb.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/identify.pb.go index 3cfed827..1c93815d 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/identify.pb.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/identify.pb.go @@ -1,91 +1,35 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: identify.proto +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: pb/identify.proto -package identify_pb +package pb import ( - fmt "fmt" - io "io" - math "math" - math_bits "math/bits" - - proto "github.com/gogo/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Delta struct { - // new protocols now serviced by the peer. - AddedProtocols []string `protobuf:"bytes,1,rep,name=added_protocols,json=addedProtocols" json:"added_protocols,omitempty"` - // protocols dropped by the peer. - RmProtocols []string `protobuf:"bytes,2,rep,name=rm_protocols,json=rmProtocols" json:"rm_protocols,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Delta) Reset() { *m = Delta{} } -func (m *Delta) String() string { return proto.CompactTextString(m) } -func (*Delta) ProtoMessage() {} -func (*Delta) Descriptor() ([]byte, []int) { - return fileDescriptor_83f1e7e6b485409f, []int{0} -} -func (m *Delta) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Delta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Delta.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Delta) XXX_Merge(src proto.Message) { - xxx_messageInfo_Delta.Merge(m, src) -} -func (m *Delta) XXX_Size() int { - return m.Size() -} -func (m *Delta) XXX_DiscardUnknown() { - xxx_messageInfo_Delta.DiscardUnknown(m) -} - -var xxx_messageInfo_Delta proto.InternalMessageInfo - -func (m *Delta) GetAddedProtocols() []string { - if m != nil { - return m.AddedProtocols - } - return nil -} - -func (m *Delta) GetRmProtocols() []string { - if m != nil { - return m.RmProtocols - } - return nil -} +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Identify struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // protocolVersion determines compatibility between peers - ProtocolVersion *string `protobuf:"bytes,5,opt,name=protocolVersion" json:"protocolVersion,omitempty"` + ProtocolVersion *string `protobuf:"bytes,5,opt,name=protocolVersion" json:"protocolVersion,omitempty"` // e.g. ipfs/1.0.0 // agentVersion is like a UserAgent string in browsers, or client version in bittorrent // includes the client name and client. - AgentVersion *string `protobuf:"bytes,6,opt,name=agentVersion" json:"agentVersion,omitempty"` + AgentVersion *string `protobuf:"bytes,6,opt,name=agentVersion" json:"agentVersion,omitempty"` // e.g. go-ipfs/0.1.0 // publicKey is this node's public key (which also gives its node.ID) // - may not need to be sent, as secure channel implies it has been sent. // - then again, if we change / disable secure channel, may still want it. @@ -98,877 +42,178 @@ type Identify struct { ObservedAddr []byte `protobuf:"bytes,4,opt,name=observedAddr" json:"observedAddr,omitempty"` // protocols are the services this node is running Protocols []string `protobuf:"bytes,3,rep,name=protocols" json:"protocols,omitempty"` - // a delta update is incompatible with everything else. If this field is included, none of the others can appear. - Delta *Delta `protobuf:"bytes,7,opt,name=delta" json:"delta,omitempty"` // signedPeerRecord contains a serialized SignedEnvelope containing a PeerRecord, // signed by the sending node. It contains the same addresses as the listenAddrs field, but // in a form that lets us share authenticated addrs with other peers. // see github.com/libp2p/go-libp2p/core/record/pb/envelope.proto and // github.com/libp2p/go-libp2p/core/peer/pb/peer_record.proto for message definitions. - SignedPeerRecord []byte `protobuf:"bytes,8,opt,name=signedPeerRecord" json:"signedPeerRecord,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + SignedPeerRecord []byte `protobuf:"bytes,8,opt,name=signedPeerRecord" json:"signedPeerRecord,omitempty"` } -func (m *Identify) Reset() { *m = Identify{} } -func (m *Identify) String() string { return proto.CompactTextString(m) } -func (*Identify) ProtoMessage() {} -func (*Identify) Descriptor() ([]byte, []int) { - return fileDescriptor_83f1e7e6b485409f, []int{1} -} -func (m *Identify) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Identify) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Identify.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Identify) Reset() { + *x = Identify{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_identify_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *Identify) XXX_Merge(src proto.Message) { - xxx_messageInfo_Identify.Merge(m, src) -} -func (m *Identify) XXX_Size() int { - return m.Size() -} -func (m *Identify) XXX_DiscardUnknown() { - xxx_messageInfo_Identify.DiscardUnknown(m) + +func (x *Identify) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_Identify proto.InternalMessageInfo +func (*Identify) ProtoMessage() {} -func (m *Identify) GetProtocolVersion() string { - if m != nil && m.ProtocolVersion != nil { - return *m.ProtocolVersion +func (x *Identify) ProtoReflect() protoreflect.Message { + mi := &file_pb_identify_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (m *Identify) GetAgentVersion() string { - if m != nil && m.AgentVersion != nil { - return *m.AgentVersion - } - return "" +// Deprecated: Use Identify.ProtoReflect.Descriptor instead. +func (*Identify) Descriptor() ([]byte, []int) { + return file_pb_identify_proto_rawDescGZIP(), []int{0} } -func (m *Identify) GetPublicKey() []byte { - if m != nil { - return m.PublicKey +func (x *Identify) GetProtocolVersion() string { + if x != nil && x.ProtocolVersion != nil { + return *x.ProtocolVersion } - return nil + return "" } -func (m *Identify) GetListenAddrs() [][]byte { - if m != nil { - return m.ListenAddrs +func (x *Identify) GetAgentVersion() string { + if x != nil && x.AgentVersion != nil { + return *x.AgentVersion } - return nil + return "" } -func (m *Identify) GetObservedAddr() []byte { - if m != nil { - return m.ObservedAddr +func (x *Identify) GetPublicKey() []byte { + if x != nil { + return x.PublicKey } return nil } -func (m *Identify) GetProtocols() []string { - if m != nil { - return m.Protocols +func (x *Identify) GetListenAddrs() [][]byte { + if x != nil { + return x.ListenAddrs } return nil } -func (m *Identify) GetDelta() *Delta { - if m != nil { - return m.Delta +func (x *Identify) GetObservedAddr() []byte { + if x != nil { + return x.ObservedAddr } return nil } -func (m *Identify) GetSignedPeerRecord() []byte { - if m != nil { - return m.SignedPeerRecord +func (x *Identify) GetProtocols() []string { + if x != nil { + return x.Protocols } return nil } -func init() { - proto.RegisterType((*Delta)(nil), "identify.pb.Delta") - proto.RegisterType((*Identify)(nil), "identify.pb.Identify") -} - -func init() { proto.RegisterFile("identify.proto", fileDescriptor_83f1e7e6b485409f) } - -var fileDescriptor_83f1e7e6b485409f = []byte{ - // 272 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x90, 0x41, 0x4a, 0xc3, 0x40, - 0x14, 0x86, 0x99, 0xd6, 0x6a, 0xf3, 0x12, 0x5a, 0x99, 0xd5, 0x2c, 0x24, 0xc4, 0x6c, 0x1c, 0x5c, - 0x64, 0xe1, 0x0d, 0x14, 0x37, 0xe2, 0xa6, 0x8c, 0xe0, 0x56, 0x92, 0xbc, 0x67, 0x19, 0x48, 0x33, - 0x65, 0x32, 0x0a, 0xbd, 0x95, 0xc7, 0x70, 0xe9, 0x11, 0x24, 0x27, 0x91, 0x4c, 0x4d, 0x93, 0xea, - 0x72, 0x3e, 0x3e, 0xe6, 0x7f, 0xff, 0x0f, 0x0b, 0x8d, 0x54, 0x3b, 0xfd, 0xba, 0xcb, 0xb6, 0xd6, - 0x38, 0xc3, 0xc3, 0xe1, 0x5d, 0xa4, 0x4f, 0x30, 0xbb, 0xa7, 0xca, 0xe5, 0xfc, 0x0a, 0x96, 0x39, - 0x22, 0xe1, 0x8b, 0x97, 0x4a, 0x53, 0x35, 0x82, 0x25, 0x53, 0x19, 0xa8, 0x85, 0xc7, 0xab, 0x9e, - 0xf2, 0x4b, 0x88, 0xec, 0x66, 0x64, 0x4d, 0xbc, 0x15, 0xda, 0xcd, 0x41, 0x49, 0x3f, 0x26, 0x30, - 0x7f, 0xf8, 0x0d, 0xe1, 0x12, 0x96, 0xbd, 0xfc, 0x4c, 0xb6, 0xd1, 0xa6, 0x16, 0xb3, 0x84, 0xc9, - 0x40, 0xfd, 0xc5, 0x3c, 0x85, 0x28, 0x5f, 0x53, 0xed, 0x7a, 0xed, 0xd4, 0x6b, 0x47, 0x8c, 0x5f, - 0x40, 0xb0, 0x7d, 0x2b, 0x2a, 0x5d, 0x3e, 0xd2, 0x4e, 0xb0, 0x84, 0xc9, 0x48, 0x0d, 0x80, 0x27, - 0x10, 0x56, 0xba, 0x71, 0x54, 0xdf, 0x22, 0xda, 0xfd, 0x69, 0x91, 0x1a, 0xa3, 0x2e, 0xc3, 0x14, - 0x0d, 0xd9, 0x77, 0xc2, 0x0e, 0x88, 0x13, 0xff, 0xc5, 0x11, 0xf3, 0x19, 0x87, 0x7a, 0x53, 0x5f, - 0x6f, 0x00, 0x5c, 0xc2, 0x0c, 0xbb, 0xc5, 0xc4, 0x59, 0xc2, 0x64, 0x78, 0xc3, 0xb3, 0xd1, 0x9c, - 0x99, 0xdf, 0x52, 0xed, 0x05, 0x7e, 0x0d, 0xe7, 0x8d, 0x5e, 0xd7, 0x84, 0x2b, 0x22, 0xab, 0xa8, - 0x34, 0x16, 0xc5, 0xdc, 0xe7, 0xfd, 0xe3, 0x77, 0xd1, 0x67, 0x1b, 0xb3, 0xaf, 0x36, 0x66, 0xdf, - 0x6d, 0xcc, 0x7e, 0x02, 0x00, 0x00, 0xff, 0xff, 0xc0, 0x03, 0xc8, 0x41, 0xb3, 0x01, 0x00, 0x00, -} - -func (m *Delta) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Delta) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Delta) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.RmProtocols) > 0 { - for iNdEx := len(m.RmProtocols) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RmProtocols[iNdEx]) - copy(dAtA[i:], m.RmProtocols[iNdEx]) - i = encodeVarintIdentify(dAtA, i, uint64(len(m.RmProtocols[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.AddedProtocols) > 0 { - for iNdEx := len(m.AddedProtocols) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AddedProtocols[iNdEx]) - copy(dAtA[i:], m.AddedProtocols[iNdEx]) - i = encodeVarintIdentify(dAtA, i, uint64(len(m.AddedProtocols[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Identify) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Identify) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Identify) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.SignedPeerRecord != nil { - i -= len(m.SignedPeerRecord) - copy(dAtA[i:], m.SignedPeerRecord) - i = encodeVarintIdentify(dAtA, i, uint64(len(m.SignedPeerRecord))) - i-- - dAtA[i] = 0x42 - } - if m.Delta != nil { - { - size, err := m.Delta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIdentify(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if m.AgentVersion != nil { - i -= len(*m.AgentVersion) - copy(dAtA[i:], *m.AgentVersion) - i = encodeVarintIdentify(dAtA, i, uint64(len(*m.AgentVersion))) - i-- - dAtA[i] = 0x32 - } - if m.ProtocolVersion != nil { - i -= len(*m.ProtocolVersion) - copy(dAtA[i:], *m.ProtocolVersion) - i = encodeVarintIdentify(dAtA, i, uint64(len(*m.ProtocolVersion))) - i-- - dAtA[i] = 0x2a - } - if m.ObservedAddr != nil { - i -= len(m.ObservedAddr) - copy(dAtA[i:], m.ObservedAddr) - i = encodeVarintIdentify(dAtA, i, uint64(len(m.ObservedAddr))) - i-- - dAtA[i] = 0x22 - } - if len(m.Protocols) > 0 { - for iNdEx := len(m.Protocols) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Protocols[iNdEx]) - copy(dAtA[i:], m.Protocols[iNdEx]) - i = encodeVarintIdentify(dAtA, i, uint64(len(m.Protocols[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.ListenAddrs) > 0 { - for iNdEx := len(m.ListenAddrs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ListenAddrs[iNdEx]) - copy(dAtA[i:], m.ListenAddrs[iNdEx]) - i = encodeVarintIdentify(dAtA, i, uint64(len(m.ListenAddrs[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if m.PublicKey != nil { - i -= len(m.PublicKey) - copy(dAtA[i:], m.PublicKey) - i = encodeVarintIdentify(dAtA, i, uint64(len(m.PublicKey))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintIdentify(dAtA []byte, offset int, v uint64) int { - offset -= sovIdentify(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Delta) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.AddedProtocols) > 0 { - for _, s := range m.AddedProtocols { - l = len(s) - n += 1 + l + sovIdentify(uint64(l)) - } - } - if len(m.RmProtocols) > 0 { - for _, s := range m.RmProtocols { - l = len(s) - n += 1 + l + sovIdentify(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Identify) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PublicKey != nil { - l = len(m.PublicKey) - n += 1 + l + sovIdentify(uint64(l)) - } - if len(m.ListenAddrs) > 0 { - for _, b := range m.ListenAddrs { - l = len(b) - n += 1 + l + sovIdentify(uint64(l)) - } - } - if len(m.Protocols) > 0 { - for _, s := range m.Protocols { - l = len(s) - n += 1 + l + sovIdentify(uint64(l)) - } - } - if m.ObservedAddr != nil { - l = len(m.ObservedAddr) - n += 1 + l + sovIdentify(uint64(l)) - } - if m.ProtocolVersion != nil { - l = len(*m.ProtocolVersion) - n += 1 + l + sovIdentify(uint64(l)) - } - if m.AgentVersion != nil { - l = len(*m.AgentVersion) - n += 1 + l + sovIdentify(uint64(l)) - } - if m.Delta != nil { - l = m.Delta.Size() - n += 1 + l + sovIdentify(uint64(l)) - } - if m.SignedPeerRecord != nil { - l = len(m.SignedPeerRecord) - n += 1 + l + sovIdentify(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovIdentify(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozIdentify(x uint64) (n int) { - return sovIdentify(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Delta) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIdentify - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Delta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Delta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AddedProtocols", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIdentify - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIdentify - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIdentify - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AddedProtocols = append(m.AddedProtocols, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RmProtocols", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIdentify - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIdentify - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIdentify - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RmProtocols = append(m.RmProtocols, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIdentify(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthIdentify - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *Identify) GetSignedPeerRecord() []byte { + if x != nil { + return x.SignedPeerRecord } return nil } -func (m *Identify) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIdentify - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Identify: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Identify: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PublicKey", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIdentify - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthIdentify - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthIdentify - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PublicKey = append(m.PublicKey[:0], dAtA[iNdEx:postIndex]...) - if m.PublicKey == nil { - m.PublicKey = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListenAddrs", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIdentify - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthIdentify - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthIdentify - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ListenAddrs = append(m.ListenAddrs, make([]byte, postIndex-iNdEx)) - copy(m.ListenAddrs[len(m.ListenAddrs)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocols", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIdentify - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIdentify - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIdentify - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Protocols = append(m.Protocols, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedAddr", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIdentify - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthIdentify - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthIdentify - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ObservedAddr = append(m.ObservedAddr[:0], dAtA[iNdEx:postIndex]...) - if m.ObservedAddr == nil { - m.ObservedAddr = []byte{} - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProtocolVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIdentify - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIdentify - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIdentify - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ProtocolVersion = &s - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AgentVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIdentify - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIdentify - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIdentify - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.AgentVersion = &s - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Delta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIdentify - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIdentify - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIdentify - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Delta == nil { - m.Delta = &Delta{} - } - if err := m.Delta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SignedPeerRecord", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIdentify - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthIdentify - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthIdentify - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SignedPeerRecord = append(m.SignedPeerRecord[:0], dAtA[iNdEx:postIndex]...) - if m.SignedPeerRecord == nil { - m.SignedPeerRecord = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIdentify(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthIdentify - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipIdentify(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowIdentify - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowIdentify - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowIdentify - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthIdentify - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupIdentify - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthIdentify - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +var File_pb_identify_proto protoreflect.FileDescriptor + +var file_pb_identify_proto_rawDesc = []byte{ + 0x0a, 0x11, 0x70, 0x62, 0x2f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x79, 0x2e, 0x70, 0x62, + 0x22, 0x86, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x79, 0x12, 0x28, 0x0a, + 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, + 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, + 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x6c, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0b, + 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x6f, + 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0c, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x41, 0x64, 0x64, 0x72, 0x12, + 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x2a, 0x0a, + 0x10, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x63, 0x6f, 0x72, + 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x50, + 0x65, 0x65, 0x72, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, } var ( - ErrInvalidLengthIdentify = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowIdentify = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupIdentify = fmt.Errorf("proto: unexpected end of group") + file_pb_identify_proto_rawDescOnce sync.Once + file_pb_identify_proto_rawDescData = file_pb_identify_proto_rawDesc ) + +func file_pb_identify_proto_rawDescGZIP() []byte { + file_pb_identify_proto_rawDescOnce.Do(func() { + file_pb_identify_proto_rawDescData = protoimpl.X.CompressGZIP(file_pb_identify_proto_rawDescData) + }) + return file_pb_identify_proto_rawDescData +} + +var file_pb_identify_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_pb_identify_proto_goTypes = []interface{}{ + (*Identify)(nil), // 0: identify.pb.Identify +} +var file_pb_identify_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_pb_identify_proto_init() } +func file_pb_identify_proto_init() { + if File_pb_identify_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pb_identify_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Identify); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pb_identify_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pb_identify_proto_goTypes, + DependencyIndexes: file_pb_identify_proto_depIdxs, + MessageInfos: file_pb_identify_proto_msgTypes, + }.Build() + File_pb_identify_proto = out.File + file_pb_identify_proto_rawDesc = nil + file_pb_identify_proto_goTypes = nil + file_pb_identify_proto_depIdxs = nil +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/identify.proto b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/identify.proto index bdb28330..cda102d4 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/identify.proto +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/pb/identify.proto @@ -2,13 +2,6 @@ syntax = "proto2"; package identify.pb; -message Delta { - // new protocols now serviced by the peer. - repeated string added_protocols = 1; - // protocols dropped by the peer. - repeated string rm_protocols = 2; -} - message Identify { // protocolVersion determines compatibility between peers @@ -34,9 +27,6 @@ message Identify { // protocols are the services this node is running repeated string protocols = 3; - // a delta update is incompatible with everything else. If this field is included, none of the others can appear. - optional Delta delta = 7; - // signedPeerRecord contains a serialized SignedEnvelope containing a PeerRecord, // signed by the sending node. It contains the same addresses as the listenAddrs field, but // in a form that lets us share authenticated addrs with other peers. diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/peer_loop.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/peer_loop.go deleted file mode 100644 index af854933..00000000 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/peer_loop.go +++ /dev/null @@ -1,264 +0,0 @@ -package identify - -import ( - "context" - "errors" - "fmt" - "sync" - "time" - - "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/protocol" - "github.com/libp2p/go-libp2p/core/record" - pb "github.com/libp2p/go-libp2p/p2p/protocol/identify/pb" - - "github.com/libp2p/go-msgio/protoio" - ma "github.com/multiformats/go-multiaddr" -) - -var errProtocolNotSupported = errors.New("protocol not supported") - -type identifySnapshot struct { - protocols []string - addrs []ma.Multiaddr - record *record.Envelope -} - -type peerHandler struct { - ids *idService - - cancel context.CancelFunc - - pid peer.ID - - snapshotMu sync.RWMutex - snapshot *identifySnapshot - - pushCh chan struct{} - deltaCh chan struct{} -} - -func newPeerHandler(pid peer.ID, ids *idService) *peerHandler { - ph := &peerHandler{ - ids: ids, - pid: pid, - - snapshot: ids.getSnapshot(), - - pushCh: make(chan struct{}, 1), - deltaCh: make(chan struct{}, 1), - } - - return ph -} - -// start starts a handler. This may only be called on a stopped handler, and must -// not be called concurrently with start/stop. -// -// This may _not_ be called on a _canceled_ handler. I.e., a handler where the -// passed in context expired. -func (ph *peerHandler) start(ctx context.Context, onExit func()) { - if ph.cancel != nil { - // If this happens, we have a bug. It means we tried to start - // before we stopped. - panic("peer handler already running") - } - - ctx, cancel := context.WithCancel(ctx) - ph.cancel = cancel - - go ph.loop(ctx, onExit) -} - -// stop stops a handler. This may not be called concurrently with any -// other calls to stop/start. -func (ph *peerHandler) stop() error { - if ph.cancel != nil { - ph.cancel() - ph.cancel = nil - } - return nil -} - -// per peer loop for pushing updates -func (ph *peerHandler) loop(ctx context.Context, onExit func()) { - defer onExit() - - for { - select { - // our listen addresses have changed, send an IDPush. - case <-ph.pushCh: - if err := ph.sendPush(ctx); err != nil { - log.Warnw("failed to send Identify Push", "peer", ph.pid, "error", err) - } - - case <-ph.deltaCh: - if err := ph.sendDelta(ctx); err != nil { - log.Warnw("failed to send Identify Delta", "peer", ph.pid, "error", err) - } - - case <-ctx.Done(): - return - } - } -} - -func (ph *peerHandler) sendDelta(ctx context.Context) error { - // send a push if the peer does not support the Delta protocol. - if !ph.peerSupportsProtos(ctx, []string{IDDelta}) { - log.Debugw("will send push as peer does not support delta", "peer", ph.pid) - if err := ph.sendPush(ctx); err != nil { - return fmt.Errorf("failed to send push on delta message: %w", err) - } - return nil - } - - // extract a delta message, updating the last state. - mes := ph.nextDelta() - if mes == nil || (len(mes.AddedProtocols) == 0 && len(mes.RmProtocols) == 0) { - return nil - } - - ds, err := ph.openStream(ctx, []string{IDDelta}) - if err != nil { - return fmt.Errorf("failed to open delta stream: %w", err) - } - - defer ds.Close() - - c := ds.Conn() - if err := protoio.NewDelimitedWriter(ds).WriteMsg(&pb.Identify{Delta: mes}); err != nil { - _ = ds.Reset() - return fmt.Errorf("failed to send delta message, %w", err) - } - log.Debugw("sent identify update", "protocol", ds.Protocol(), "peer", c.RemotePeer(), - "peer address", c.RemoteMultiaddr()) - - return nil -} - -func (ph *peerHandler) sendPush(ctx context.Context) error { - dp, err := ph.openStream(ctx, []string{IDPush}) - if err == errProtocolNotSupported { - log.Debugw("not sending push as peer does not support protocol", "peer", ph.pid) - return nil - } - if err != nil { - return fmt.Errorf("failed to open push stream: %w", err) - } - defer dp.Close() - - snapshot := ph.ids.getSnapshot() - ph.snapshotMu.Lock() - ph.snapshot = snapshot - ph.snapshotMu.Unlock() - if err := ph.ids.writeChunkedIdentifyMsg(dp.Conn(), snapshot, dp); err != nil { - _ = dp.Reset() - return fmt.Errorf("failed to send push message: %w", err) - } - - return nil -} - -func (ph *peerHandler) openStream(ctx context.Context, protos []string) (network.Stream, error) { - // wait for the other peer to send us an Identify response on "all" connections we have with it - // so we can look at it's supported protocols and avoid a multistream-select roundtrip to negotiate the protocol - // if we know for a fact that it dosen't support the protocol. - conns := ph.ids.Host.Network().ConnsToPeer(ph.pid) - for _, c := range conns { - select { - case <-ph.ids.IdentifyWait(c): - case <-ctx.Done(): - return nil, ctx.Err() - } - } - - if !ph.peerSupportsProtos(ctx, protos) { - return nil, errProtocolNotSupported - } - - ph.ids.pushSemaphore <- struct{}{} - defer func() { - <-ph.ids.pushSemaphore - }() - - // negotiate a stream without opening a new connection as we "should" already have a connection. - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - ctx = network.WithNoDial(ctx, "should already have connection") - - // newstream will open a stream on the first protocol the remote peer supports from the among - // the list of protocols passed to it. - s, err := ph.ids.Host.NewStream(ctx, ph.pid, protocol.ConvertFromStrings(protos)...) - if err != nil { - return nil, err - } - - return s, err -} - -// returns true if the peer supports atleast one of the given protocols -func (ph *peerHandler) peerSupportsProtos(ctx context.Context, protos []string) bool { - conns := ph.ids.Host.Network().ConnsToPeer(ph.pid) - for _, c := range conns { - select { - case <-ph.ids.IdentifyWait(c): - case <-ctx.Done(): - return false - } - } - - pstore := ph.ids.Host.Peerstore() - - if sup, err := pstore.SupportsProtocols(ph.pid, protos...); err == nil && len(sup) == 0 { - return false - } - return true -} - -func (ph *peerHandler) nextDelta() *pb.Delta { - curr := ph.ids.Host.Mux().Protocols() - - // Extract the old protocol list and replace the old snapshot with an - // updated one. - ph.snapshotMu.Lock() - snapshot := *ph.snapshot - old := snapshot.protocols - snapshot.protocols = curr - ph.snapshot = &snapshot - ph.snapshotMu.Unlock() - - oldProtos := make(map[string]struct{}, len(old)) - currProtos := make(map[string]struct{}, len(curr)) - - for _, proto := range old { - oldProtos[proto] = struct{}{} - } - - for _, proto := range curr { - currProtos[proto] = struct{}{} - } - - var added []string - var removed []string - - // has it been added ? - for p := range currProtos { - if _, ok := oldProtos[p]; !ok { - added = append(added, p) - } - } - - // has it been removed ? - for p := range oldProtos { - if _, ok := currProtos[p]; !ok { - removed = append(removed, p) - } - } - - return &pb.Delta{ - AddedProtocols: added, - RmProtocols: removed, - } -} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_go118.go b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/user_agent.go similarity index 97% rename from vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_go118.go rename to vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/user_agent.go index f934dc66..016f941f 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/id_go118.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/protocol/identify/user_agent.go @@ -1,5 +1,3 @@ -//go:build go1.18 - package identify import ( diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/handshake.go b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/handshake.go index 4dd481d6..e1a18e9b 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/handshake.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/handshake.go @@ -15,11 +15,13 @@ import ( "github.com/libp2p/go-libp2p/p2p/security/noise/pb" "github.com/flynn/noise" - "github.com/gogo/protobuf/proto" pool "github.com/libp2p/go-buffer-pool" "github.com/minio/sha256-simd" + "google.golang.org/protobuf/proto" ) +//go:generate protoc --go_out=. --go_opt=Mpb/payload.proto=./pb pb/payload.proto + // payloadSigPrefix is prepended to our Noise static key before signing with // our libp2p identity key. const payloadSigPrefix = "noise-libp2p-static-key:" diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/Makefile b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/Makefile deleted file mode 100644 index 7cf8222f..00000000 --- a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -PB = $(wildcard *.proto) -GO = $(PB:.proto=.pb.go) - -all: $(GO) - -%.pb.go: %.proto - protoc --proto_path=$(PWD):$(PWD)/../.. --gogofaster_out=. $< - -clean: - rm -f *.pb.go - rm -f *.go diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/payload.pb.go b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/payload.pb.go index fdebe487..8e3a805a 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/payload.pb.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/pb/payload.pb.go @@ -1,664 +1,239 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: payload.proto +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: pb/payload.proto package pb import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type NoiseExtensions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + WebtransportCerthashes [][]byte `protobuf:"bytes,1,rep,name=webtransport_certhashes,json=webtransportCerthashes" json:"webtransport_certhashes,omitempty"` StreamMuxers []string `protobuf:"bytes,2,rep,name=stream_muxers,json=streamMuxers" json:"stream_muxers,omitempty"` } -func (m *NoiseExtensions) Reset() { *m = NoiseExtensions{} } -func (m *NoiseExtensions) String() string { return proto.CompactTextString(m) } -func (*NoiseExtensions) ProtoMessage() {} -func (*NoiseExtensions) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{0} -} -func (m *NoiseExtensions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NoiseExtensions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NoiseExtensions.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NoiseExtensions) XXX_Merge(src proto.Message) { - xxx_messageInfo_NoiseExtensions.Merge(m, src) -} -func (m *NoiseExtensions) XXX_Size() int { - return m.Size() -} -func (m *NoiseExtensions) XXX_DiscardUnknown() { - xxx_messageInfo_NoiseExtensions.DiscardUnknown(m) -} - -var xxx_messageInfo_NoiseExtensions proto.InternalMessageInfo - -func (m *NoiseExtensions) GetWebtransportCerthashes() [][]byte { - if m != nil { - return m.WebtransportCerthashes +func (x *NoiseExtensions) Reset() { + *x = NoiseExtensions{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_payload_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -func (m *NoiseExtensions) GetStreamMuxers() []string { - if m != nil { - return m.StreamMuxers - } - return nil +func (x *NoiseExtensions) String() string { + return protoimpl.X.MessageStringOf(x) } -type NoiseHandshakePayload struct { - IdentityKey []byte `protobuf:"bytes,1,opt,name=identity_key,json=identityKey" json:"identity_key"` - IdentitySig []byte `protobuf:"bytes,2,opt,name=identity_sig,json=identitySig" json:"identity_sig"` - Extensions *NoiseExtensions `protobuf:"bytes,4,opt,name=extensions" json:"extensions,omitempty"` -} +func (*NoiseExtensions) ProtoMessage() {} -func (m *NoiseHandshakePayload) Reset() { *m = NoiseHandshakePayload{} } -func (m *NoiseHandshakePayload) String() string { return proto.CompactTextString(m) } -func (*NoiseHandshakePayload) ProtoMessage() {} -func (*NoiseHandshakePayload) Descriptor() ([]byte, []int) { - return fileDescriptor_678c914f1bee6d56, []int{1} -} -func (m *NoiseHandshakePayload) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NoiseHandshakePayload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NoiseHandshakePayload.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (x *NoiseExtensions) ProtoReflect() protoreflect.Message { + mi := &file_pb_payload_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *NoiseHandshakePayload) XXX_Merge(src proto.Message) { - xxx_messageInfo_NoiseHandshakePayload.Merge(m, src) -} -func (m *NoiseHandshakePayload) XXX_Size() int { - return m.Size() -} -func (m *NoiseHandshakePayload) XXX_DiscardUnknown() { - xxx_messageInfo_NoiseHandshakePayload.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_NoiseHandshakePayload proto.InternalMessageInfo - -func (m *NoiseHandshakePayload) GetIdentityKey() []byte { - if m != nil { - return m.IdentityKey - } - return nil +// Deprecated: Use NoiseExtensions.ProtoReflect.Descriptor instead. +func (*NoiseExtensions) Descriptor() ([]byte, []int) { + return file_pb_payload_proto_rawDescGZIP(), []int{0} } -func (m *NoiseHandshakePayload) GetIdentitySig() []byte { - if m != nil { - return m.IdentitySig +func (x *NoiseExtensions) GetWebtransportCerthashes() [][]byte { + if x != nil { + return x.WebtransportCerthashes } return nil } -func (m *NoiseHandshakePayload) GetExtensions() *NoiseExtensions { - if m != nil { - return m.Extensions +func (x *NoiseExtensions) GetStreamMuxers() []string { + if x != nil { + return x.StreamMuxers } return nil } -func init() { - proto.RegisterType((*NoiseExtensions)(nil), "pb.NoiseExtensions") - proto.RegisterType((*NoiseHandshakePayload)(nil), "pb.NoiseHandshakePayload") -} - -func init() { proto.RegisterFile("payload.proto", fileDescriptor_678c914f1bee6d56) } - -var fileDescriptor_678c914f1bee6d56 = []byte{ - // 251 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2d, 0x48, 0xac, 0xcc, - 0xc9, 0x4f, 0x4c, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2a, 0x48, 0x52, 0xca, 0xe7, - 0xe2, 0xf7, 0xcb, 0xcf, 0x2c, 0x4e, 0x75, 0xad, 0x28, 0x49, 0xcd, 0x2b, 0xce, 0xcc, 0xcf, 0x2b, - 0x16, 0x32, 0xe7, 0x12, 0x2f, 0x4f, 0x4d, 0x2a, 0x29, 0x4a, 0xcc, 0x2b, 0x2e, 0xc8, 0x2f, 0x2a, - 0x89, 0x4f, 0x4e, 0x2d, 0x2a, 0xc9, 0x48, 0x2c, 0xce, 0x48, 0x2d, 0x96, 0x60, 0x54, 0x60, 0xd6, - 0xe0, 0x09, 0x12, 0x43, 0x96, 0x76, 0x86, 0xcb, 0x0a, 0x29, 0x73, 0xf1, 0x16, 0x97, 0x14, 0xa5, - 0x26, 0xe6, 0xc6, 0xe7, 0x96, 0x56, 0xa4, 0x16, 0x15, 0x4b, 0x30, 0x29, 0x30, 0x6b, 0x70, 0x06, - 0xf1, 0x40, 0x04, 0x7d, 0xc1, 0x62, 0x4a, 0xf3, 0x18, 0xb9, 0x44, 0xc1, 0x36, 0x7a, 0x24, 0xe6, - 0xa5, 0x14, 0x67, 0x24, 0x66, 0xa7, 0x06, 0x40, 0x1c, 0x25, 0xa4, 0xce, 0xc5, 0x93, 0x99, 0x92, - 0x9a, 0x57, 0x92, 0x59, 0x52, 0x19, 0x9f, 0x9d, 0x5a, 0x29, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xe3, - 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x37, 0x4c, 0xc6, 0x3b, 0xb5, 0x12, 0x45, 0x61, 0x71, - 0x66, 0xba, 0x04, 0x13, 0x36, 0x85, 0xc1, 0x99, 0xe9, 0x42, 0xc6, 0x5c, 0x5c, 0xa9, 0x70, 0x7f, - 0x49, 0xb0, 0x28, 0x30, 0x6a, 0x70, 0x1b, 0x09, 0xeb, 0x15, 0x24, 0xe9, 0xa1, 0x79, 0x39, 0x08, - 0x49, 0x99, 0x93, 0xc4, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, - 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x00, 0x02, 0x00, - 0x00, 0xff, 0xff, 0x02, 0xdb, 0x23, 0xb3, 0x3f, 0x01, 0x00, 0x00, -} - -func (m *NoiseExtensions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} +type NoiseHandshakePayload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *NoiseExtensions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + IdentityKey []byte `protobuf:"bytes,1,opt,name=identity_key,json=identityKey" json:"identity_key,omitempty"` + IdentitySig []byte `protobuf:"bytes,2,opt,name=identity_sig,json=identitySig" json:"identity_sig,omitempty"` + Extensions *NoiseExtensions `protobuf:"bytes,4,opt,name=extensions" json:"extensions,omitempty"` } -func (m *NoiseExtensions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.StreamMuxers) > 0 { - for iNdEx := len(m.StreamMuxers) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.StreamMuxers[iNdEx]) - copy(dAtA[i:], m.StreamMuxers[iNdEx]) - i = encodeVarintPayload(dAtA, i, uint64(len(m.StreamMuxers[iNdEx]))) - i-- - dAtA[i] = 0x12 - } +func (x *NoiseHandshakePayload) Reset() { + *x = NoiseHandshakePayload{} + if protoimpl.UnsafeEnabled { + mi := &file_pb_payload_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - if len(m.WebtransportCerthashes) > 0 { - for iNdEx := len(m.WebtransportCerthashes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.WebtransportCerthashes[iNdEx]) - copy(dAtA[i:], m.WebtransportCerthashes[iNdEx]) - i = encodeVarintPayload(dAtA, i, uint64(len(m.WebtransportCerthashes[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil } -func (m *NoiseHandshakePayload) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (x *NoiseHandshakePayload) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NoiseHandshakePayload) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +func (*NoiseHandshakePayload) ProtoMessage() {} -func (m *NoiseHandshakePayload) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Extensions != nil { - { - size, err := m.Extensions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPayload(dAtA, i, uint64(size)) +func (x *NoiseHandshakePayload) ProtoReflect() protoreflect.Message { + mi := &file_pb_payload_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - i-- - dAtA[i] = 0x22 - } - if m.IdentitySig != nil { - i -= len(m.IdentitySig) - copy(dAtA[i:], m.IdentitySig) - i = encodeVarintPayload(dAtA, i, uint64(len(m.IdentitySig))) - i-- - dAtA[i] = 0x12 + return ms } - if m.IdentityKey != nil { - i -= len(m.IdentityKey) - copy(dAtA[i:], m.IdentityKey) - i = encodeVarintPayload(dAtA, i, uint64(len(m.IdentityKey))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return mi.MessageOf(x) } -func encodeVarintPayload(dAtA []byte, offset int, v uint64) int { - offset -= sovPayload(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *NoiseExtensions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.WebtransportCerthashes) > 0 { - for _, b := range m.WebtransportCerthashes { - l = len(b) - n += 1 + l + sovPayload(uint64(l)) - } - } - if len(m.StreamMuxers) > 0 { - for _, s := range m.StreamMuxers { - l = len(s) - n += 1 + l + sovPayload(uint64(l)) - } - } - return n +// Deprecated: Use NoiseHandshakePayload.ProtoReflect.Descriptor instead. +func (*NoiseHandshakePayload) Descriptor() ([]byte, []int) { + return file_pb_payload_proto_rawDescGZIP(), []int{1} } -func (m *NoiseHandshakePayload) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.IdentityKey != nil { - l = len(m.IdentityKey) - n += 1 + l + sovPayload(uint64(l)) +func (x *NoiseHandshakePayload) GetIdentityKey() []byte { + if x != nil { + return x.IdentityKey } - if m.IdentitySig != nil { - l = len(m.IdentitySig) - n += 1 + l + sovPayload(uint64(l)) - } - if m.Extensions != nil { - l = m.Extensions.Size() - n += 1 + l + sovPayload(uint64(l)) - } - return n -} - -func sovPayload(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozPayload(x uint64) (n int) { - return sovPayload(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return nil } -func (m *NoiseExtensions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NoiseExtensions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NoiseExtensions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WebtransportCerthashes", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthPayload - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.WebtransportCerthashes = append(m.WebtransportCerthashes, make([]byte, postIndex-iNdEx)) - copy(m.WebtransportCerthashes[len(m.WebtransportCerthashes)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StreamMuxers", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPayload - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StreamMuxers = append(m.StreamMuxers, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPayload(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPayload - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *NoiseHandshakePayload) GetIdentitySig() []byte { + if x != nil { + return x.IdentitySig } return nil } -func (m *NoiseHandshakePayload) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NoiseHandshakePayload: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NoiseHandshakePayload: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IdentityKey", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthPayload - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IdentityKey = append(m.IdentityKey[:0], dAtA[iNdEx:postIndex]...) - if m.IdentityKey == nil { - m.IdentityKey = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IdentitySig", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthPayload - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IdentitySig = append(m.IdentitySig[:0], dAtA[iNdEx:postIndex]...) - if m.IdentitySig == nil { - m.IdentitySig = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Extensions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPayload - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPayload - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPayload - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Extensions == nil { - m.Extensions = &NoiseExtensions{} - } - if err := m.Extensions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPayload(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPayload - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *NoiseHandshakePayload) GetExtensions() *NoiseExtensions { + if x != nil { + return x.Extensions } return nil } -func skipPayload(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPayload - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPayload - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPayload - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthPayload - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupPayload - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthPayload - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF + +var File_pb_payload_proto protoreflect.FileDescriptor + +var file_pb_payload_proto_rawDesc = []byte{ + 0x0a, 0x10, 0x70, 0x62, 0x2f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x22, 0x6f, 0x0a, 0x0f, 0x4e, 0x6f, 0x69, 0x73, 0x65, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x17, 0x77, 0x65, 0x62, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x68, 0x61, + 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x16, 0x77, 0x65, 0x62, 0x74, + 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x65, 0x72, 0x74, 0x68, 0x61, 0x73, 0x68, + 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x6d, 0x75, 0x78, + 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x4d, 0x75, 0x78, 0x65, 0x72, 0x73, 0x22, 0x92, 0x01, 0x0a, 0x15, 0x4e, 0x6f, 0x69, 0x73, + 0x65, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x5f, 0x73, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x53, 0x69, 0x67, 0x12, 0x33, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x62, + 0x2e, 0x4e, 0x6f, 0x69, 0x73, 0x65, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, } var ( - ErrInvalidLengthPayload = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPayload = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupPayload = fmt.Errorf("proto: unexpected end of group") + file_pb_payload_proto_rawDescOnce sync.Once + file_pb_payload_proto_rawDescData = file_pb_payload_proto_rawDesc ) + +func file_pb_payload_proto_rawDescGZIP() []byte { + file_pb_payload_proto_rawDescOnce.Do(func() { + file_pb_payload_proto_rawDescData = protoimpl.X.CompressGZIP(file_pb_payload_proto_rawDescData) + }) + return file_pb_payload_proto_rawDescData +} + +var file_pb_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_pb_payload_proto_goTypes = []interface{}{ + (*NoiseExtensions)(nil), // 0: pb.NoiseExtensions + (*NoiseHandshakePayload)(nil), // 1: pb.NoiseHandshakePayload +} +var file_pb_payload_proto_depIdxs = []int32{ + 0, // 0: pb.NoiseHandshakePayload.extensions:type_name -> pb.NoiseExtensions + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_pb_payload_proto_init() } +func file_pb_payload_proto_init() { + if File_pb_payload_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pb_payload_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NoiseExtensions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pb_payload_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NoiseHandshakePayload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pb_payload_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_pb_payload_proto_goTypes, + DependencyIndexes: file_pb_payload_proto_depIdxs, + MessageInfos: file_pb_payload_proto_msgTypes, + }.Build() + File_pb_payload_proto = out.File + file_pb_payload_proto_rawDesc = nil + file_pb_payload_proto_goTypes = nil + file_pb_payload_proto_depIdxs = nil +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/session.go b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/session.go index 93ce5217..fa32ab8f 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/session.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/session.go @@ -12,6 +12,7 @@ import ( "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" ) type secureSession struct { @@ -94,10 +95,6 @@ func (s *secureSession) LocalPeer() peer.ID { return s.localID } -func (s *secureSession) LocalPrivateKey() crypto.PrivKey { - return s.localKey -} - func (s *secureSession) LocalPublicKey() crypto.PubKey { return s.localKey.GetPublic() } @@ -134,9 +131,10 @@ func (s *secureSession) Close() error { return s.insecureConn.Close() } -func SessionWithConnState(s *secureSession, muxer string) *secureSession { +func SessionWithConnState(s *secureSession, muxer protocol.ID) *secureSession { if s != nil { s.connectionState.StreamMultiplexer = muxer + s.connectionState.UsedEarlyMuxerNegotiation = muxer != "" } return s } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/transport.go b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/transport.go index 6e2882b8..e42cea1b 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/transport.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/security/noise/transport.go @@ -23,7 +23,7 @@ type Transport struct { protocolID protocol.ID localID peer.ID privateKey crypto.PrivKey - muxers []string + muxers []protocol.ID } var _ sec.SecureTransport = &Transport{} @@ -36,16 +36,16 @@ func New(id protocol.ID, privkey crypto.PrivKey, muxers []tptu.StreamMuxer) (*Tr return nil, err } - smuxers := make([]string, 0, len(muxers)) + muxerIDs := make([]protocol.ID, 0, len(muxers)) for _, m := range muxers { - smuxers = append(smuxers, string(m.ID)) + muxerIDs = append(muxerIDs, m.ID) } return &Transport{ protocolID: id, localID: localID, privateKey: privkey, - muxers: smuxers, + muxers: muxerIDs, }, nil } @@ -87,7 +87,7 @@ func (t *Transport) ID() protocol.ID { return t.protocolID } -func matchMuxers(initiatorMuxers, responderMuxers []string) string { +func matchMuxers(initiatorMuxers, responderMuxers []protocol.ID) protocol.ID { for _, initMuxer := range initiatorMuxers { for _, respMuxer := range responderMuxers { if initMuxer == respMuxer { @@ -100,7 +100,7 @@ func matchMuxers(initiatorMuxers, responderMuxers []string) string { type transportEarlyDataHandler struct { transport *Transport - receivedMuxers []string + receivedMuxers []protocol.ID } var _ EarlyDataHandler = &transportEarlyDataHandler{} @@ -111,19 +111,19 @@ func newTransportEDH(t *Transport) *transportEarlyDataHandler { func (i *transportEarlyDataHandler) Send(context.Context, net.Conn, peer.ID) *pb.NoiseExtensions { return &pb.NoiseExtensions{ - StreamMuxers: i.transport.muxers, + StreamMuxers: protocol.ConvertToStrings(i.transport.muxers), } } func (i *transportEarlyDataHandler) Received(_ context.Context, _ net.Conn, extension *pb.NoiseExtensions) error { // Discard messages with size or the number of protocols exceeding extension limit for security. if extension != nil && len(extension.StreamMuxers) <= maxProtoNum { - i.receivedMuxers = extension.GetStreamMuxers() + i.receivedMuxers = protocol.ConvertFromStrings(extension.GetStreamMuxers()) } return nil } -func (i *transportEarlyDataHandler) MatchMuxers(isInitiator bool) string { +func (i *transportEarlyDataHandler) MatchMuxers(isInitiator bool) protocol.ID { if isInitiator { return matchMuxers(i.transport.muxers, i.receivedMuxers) } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/security/tls/conn.go b/vendor/github.com/libp2p/go-libp2p/p2p/security/tls/conn.go index 3ebc7aef..143da392 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/security/tls/conn.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/security/tls/conn.go @@ -12,9 +12,7 @@ import ( type conn struct { *tls.Conn - localPeer peer.ID - privKey ci.PrivKey - + localPeer peer.ID remotePeer peer.ID remotePubKey ci.PubKey connectionState network.ConnectionState @@ -26,10 +24,6 @@ func (c *conn) LocalPeer() peer.ID { return c.localPeer } -func (c *conn) LocalPrivateKey() ci.PrivKey { - return c.privKey -} - func (c *conn) RemotePeer() peer.ID { return c.remotePeer } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/security/tls/transport.go b/vendor/github.com/libp2p/go-libp2p/p2p/security/tls/transport.go index b036bf89..7c28efe3 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/security/tls/transport.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/security/tls/transport.go @@ -166,12 +166,14 @@ func (t *Transport) setupConn(tlsConn *tls.Conn, remotePubKey ci.PubKey) (sec.Se } return &conn{ - Conn: tlsConn, - localPeer: t.localPeer, - privKey: t.privKey, - remotePeer: remotePeerID, - remotePubKey: remotePubKey, - connectionState: network.ConnectionState{StreamMultiplexer: nextProto}, + Conn: tlsConn, + localPeer: t.localPeer, + remotePeer: remotePeerID, + remotePubKey: remotePubKey, + connectionState: network.ConnectionState{ + StreamMultiplexer: protocol.ID(nextProto), + UsedEarlyMuxerNegotiation: nextProto != "", + }, }, nil } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/conn.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/conn.go index 999615ce..a2da81eb 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/conn.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/conn.go @@ -8,8 +8,8 @@ import ( "github.com/libp2p/go-libp2p/core/peer" tpt "github.com/libp2p/go-libp2p/core/transport" - "github.com/lucas-clemente/quic-go" ma "github.com/multiformats/go-multiaddr" + "github.com/quic-go/quic-go" ) type conn struct { @@ -18,7 +18,6 @@ type conn struct { scope network.ConnManagementScope localPeer peer.ID - privKey ic.PrivKey localMultiaddr ma.Multiaddr remotePeerID peer.ID @@ -32,8 +31,12 @@ var _ tpt.CapableConn = &conn{} // It must be called even if the peer closed the connection in order for // garbage collection to properly work in this package. func (c *conn) Close() error { + return c.closeWithError(0, "") +} + +func (c *conn) closeWithError(errCode quic.ApplicationErrorCode, errString string) error { c.transport.removeConn(c.quicConn) - err := c.quicConn.CloseWithError(0, "") + err := c.quicConn.CloseWithError(errCode, errString) c.scope.Done() return err } @@ -62,9 +65,6 @@ func (c *conn) AcceptStream() (network.MuxedStream, error) { // LocalPeer returns our peer ID func (c *conn) LocalPeer() peer.ID { return c.localPeer } -// LocalPrivateKey returns our private key -func (c *conn) LocalPrivateKey() ic.PrivKey { return c.privKey } - // RemotePeer returns the peer ID of the remote peer. func (c *conn) RemotePeer() peer.ID { return c.remotePeerID } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/listener.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/listener.go index ea6b68bd..73bb5026 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/listener.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/listener.go @@ -12,8 +12,8 @@ import ( p2ptls "github.com/libp2p/go-libp2p/p2p/security/tls" "github.com/libp2p/go-libp2p/p2p/transport/quicreuse" - "github.com/lucas-clemente/quic-go" ma "github.com/multiformats/go-multiaddr" + "github.com/quic-go/quic-go" ) // A listener listens for QUIC connections. @@ -56,15 +56,13 @@ func (l *listener) Accept() (tpt.CapableConn, error) { } c, err := l.setupConn(qconn) if err != nil { - qconn.CloseWithError(1, err.Error()) continue } + l.transport.addConn(qconn, c) if l.transport.gater != nil && !(l.transport.gater.InterceptAccept(c) && l.transport.gater.InterceptSecured(network.DirInbound, c.remotePeerID, c)) { - c.scope.Done() - qconn.CloseWithError(errorCodeConnectionGating, "connection gated") + c.closeWithError(errorCodeConnectionGating, "connection gated") continue } - l.transport.addConn(qconn, c) // return through active hole punching if any key := holePunchKey{addr: qconn.RemoteAddr().String(), peer: c.remotePeerID} @@ -95,23 +93,32 @@ func (l *listener) setupConn(qconn quic.Connection) (*conn, error) { log.Debugw("resource manager blocked incoming connection", "addr", qconn.RemoteAddr(), "error", err) return nil, err } + c, err := l.setupConnWithScope(qconn, connScope, remoteMultiaddr) + if err != nil { + connScope.Done() + qconn.CloseWithError(1, "") + return nil, err + } + + return c, nil +} + +func (l *listener) setupConnWithScope(qconn quic.Connection, connScope network.ConnManagementScope, remoteMultiaddr ma.Multiaddr) (*conn, error) { + // The tls.Config used to establish this connection already verified the certificate chain. // Since we don't have any way of knowing which tls.Config was used though, // we have to re-determine the peer's identity here. // Therefore, this is expected to never fail. remotePubKey, err := p2ptls.PubKeyFromCertChain(qconn.ConnectionState().TLS.PeerCertificates) if err != nil { - connScope.Done() return nil, err } remotePeerID, err := peer.IDFromPublicKey(remotePubKey) if err != nil { - connScope.Done() return nil, err } if err := connScope.SetPeer(remotePeerID); err != nil { log.Debugw("resource manager blocked incoming connection for peer", "peer", remotePeerID, "addr", qconn.RemoteAddr(), "error", err) - connScope.Done() return nil, err } @@ -126,7 +133,6 @@ func (l *listener) setupConn(qconn quic.Connection) (*conn, error) { scope: connScope, localPeer: l.localPeer, localMultiaddr: localMultiaddr, - privKey: l.privKey, remoteMultiaddr: remoteMultiaddr, remotePeerID: remotePeerID, remotePubKey: remotePubKey, diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/stream.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/stream.go index 5d276dab..56f12dad 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/stream.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/stream.go @@ -5,7 +5,7 @@ import ( "github.com/libp2p/go-libp2p/core/network" - "github.com/lucas-clemente/quic-go" + "github.com/quic-go/quic-go" ) const ( diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/transport.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/transport.go index 32547197..f279aed7 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/transport.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/transport.go @@ -10,8 +10,6 @@ import ( "sync" "time" - manet "github.com/multiformats/go-multiaddr/net" - "github.com/libp2p/go-libp2p/core/connmgr" ic "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/network" @@ -21,11 +19,11 @@ import ( p2ptls "github.com/libp2p/go-libp2p/p2p/security/tls" "github.com/libp2p/go-libp2p/p2p/transport/quicreuse" + logging "github.com/ipfs/go-log/v2" ma "github.com/multiformats/go-multiaddr" mafmt "github.com/multiformats/go-multiaddr-fmt" - - logging "github.com/ipfs/go-log/v2" - "github.com/lucas-clemente/quic-go" + manet "github.com/multiformats/go-multiaddr/net" + "github.com/quic-go/quic-go" ) var log = logging.Logger("quic-transport") @@ -48,6 +46,9 @@ type transport struct { holePunchingMx sync.Mutex holePunching map[holePunchKey]*activeHolePunch + rndMx sync.Mutex + rnd rand.Rand + connMx sync.Mutex conns map[quic.Connection]*conn @@ -96,14 +97,14 @@ func NewTransport(key ic.PrivKey, connManager *quicreuse.ConnManager, psk pnet.P rcmgr: rcmgr, conns: make(map[quic.Connection]*conn), holePunching: make(map[holePunchKey]*activeHolePunch), + rnd: *rand.New(rand.NewSource(time.Now().UnixNano())), listeners: make(map[string][]*virtualListener), }, nil } // Dial dials a new QUIC connection -func (t *transport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID) (tpt.CapableConn, error) { - tlsConf, keyCh := t.identity.ConfigForPeer(p) +func (t *transport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID) (_c tpt.CapableConn, _err error) { if ok, isClient, _ := network.GetSimultaneousConnect(ctx); ok && !isClient { return t.holePunch(ctx, raddr, p) } @@ -113,11 +114,22 @@ func (t *transport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID) (tp log.Debugw("resource manager blocked outgoing connection", "peer", p, "addr", raddr, "error", err) return nil, err } + + c, err := t.dialWithScope(ctx, raddr, p, scope) + if err != nil { + scope.Done() + return nil, err + } + return c, nil +} + +func (t *transport) dialWithScope(ctx context.Context, raddr ma.Multiaddr, p peer.ID, scope network.ConnManagementScope) (tpt.CapableConn, error) { if err := scope.SetPeer(p); err != nil { log.Debugw("resource manager blocked outgoing connection for peer", "peer", p, "addr", raddr, "error", err) - scope.Done() return nil, err } + + tlsConf, keyCh := t.identity.ConfigForPeer(p) pconn, err := t.connManager.DialQUIC(ctx, raddr, tlsConf, t.allowWindowIncrease) if err != nil { return nil, err @@ -131,7 +143,6 @@ func (t *transport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID) (tp } if remotePubKey == nil { pconn.CloseWithError(1, "") - scope.Done() return nil, errors.New("p2p/transport/quic BUG: expected remote pub key to be set") } @@ -144,7 +155,6 @@ func (t *transport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID) (tp quicConn: pconn, transport: t, scope: scope, - privKey: t.privKey, localPeer: t.localPeer, localMultiaddr: localMultiaddr, remotePubKey: remotePubKey, @@ -210,7 +220,10 @@ func (t *transport) holePunch(ctx context.Context, raddr ma.Multiaddr, p peer.ID var punchErr error loop: for i := 0; ; i++ { - if _, err := rand.Read(payload); err != nil { + t.rndMx.Lock() + _, err := t.rnd.Read(payload) + t.rndMx.Unlock() + if err != nil { punchErr = err break } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/virtuallistener.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/virtuallistener.go index a95b857d..36c11c66 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/virtuallistener.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quic/virtuallistener.go @@ -6,8 +6,9 @@ import ( tpt "github.com/libp2p/go-libp2p/core/transport" "github.com/libp2p/go-libp2p/p2p/transport/quicreuse" - "github.com/lucas-clemente/quic-go" + ma "github.com/multiformats/go-multiaddr" + "github.com/quic-go/quic-go" ) const acceptBufferPerVersion = 4 @@ -45,8 +46,9 @@ type acceptVal struct { type acceptLoopRunner struct { acceptSem chan struct{} - muxerMu sync.Mutex - muxer map[quic.VersionNumber]chan acceptVal + muxerMu sync.Mutex + muxer map[quic.VersionNumber]chan acceptVal + muxerClosed bool } func (r *acceptLoopRunner) AcceptForVersion(v quic.VersionNumber) chan acceptVal { @@ -67,6 +69,11 @@ func (r *acceptLoopRunner) RmAcceptForVersion(v quic.VersionNumber) { r.muxerMu.Lock() defer r.muxerMu.Unlock() + if r.muxerClosed { + // Already closed, all versions are removed + return + } + ch, ok := r.muxer[v] if !ok { panic("expected chan in accept muxer") @@ -78,6 +85,7 @@ func (r *acceptLoopRunner) RmAcceptForVersion(v quic.VersionNumber) { func (r *acceptLoopRunner) sendErrAndClose(err error) { r.muxerMu.Lock() defer r.muxerMu.Unlock() + r.muxerClosed = true for k, ch := range r.muxer { select { case ch <- acceptVal{err: err}: @@ -145,13 +153,23 @@ func (r *acceptLoopRunner) innerAccept(l *listener, expectedVersion quic.Version func (r *acceptLoopRunner) Accept(l *listener, expectedVersion quic.VersionNumber, bufferedConnChan chan acceptVal) (tpt.CapableConn, error) { for { - r.acceptSem <- struct{}{} - conn, err := r.innerAccept(l, expectedVersion, bufferedConnChan) - <-r.acceptSem - - if conn == nil && err == nil { - // Didn't find a conn for the expected version and there was no error, lets try again - continue + var conn tpt.CapableConn + var err error + select { + case r.acceptSem <- struct{}{}: + conn, err = r.innerAccept(l, expectedVersion, bufferedConnChan) + <-r.acceptSem + + if conn == nil && err == nil { + // Didn't find a conn for the expected version and there was no error, lets try again + continue + } + case v, ok := <-bufferedConnChan: + if !ok { + return nil, errors.New("listener closed") + } + conn = v.conn + err = v.err } return conn, err } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/config.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/config.go index 1bd90821..76a2c8cc 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/config.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/config.go @@ -4,7 +4,7 @@ import ( "net" "time" - "github.com/lucas-clemente/quic-go" + "github.com/quic-go/quic-go" ) var quicConfig = &quic.Config{ @@ -20,4 +20,6 @@ var quicConfig = &quic.Config{ Versions: []quic.VersionNumber{quic.VersionDraft29, quic.Version1}, // We don't use datagrams (yet), but this is necessary for WebTransport EnableDatagrams: true, + // The multiaddress encodes the QUIC version, thus there's no need to send Version Negotiation packets. + DisableVersionNegotiationPackets: true, } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/connmgr.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/connmgr.go index 36fdb582..0e2793ee 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/connmgr.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/connmgr.go @@ -7,10 +7,10 @@ import ( "net" "sync" - "github.com/lucas-clemente/quic-go" - quiclogging "github.com/lucas-clemente/quic-go/logging" ma "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" + "github.com/quic-go/quic-go" + quiclogging "github.com/quic-go/quic-go/logging" ) var quicDialContext = quic.DialContext // so we can mock it in tests @@ -54,7 +54,7 @@ func NewConnManager(statelessResetKey quic.StatelessResetKey, opts ...Option) (* tracers = append(tracers, qlogTracer) } if cm.enableMetrics { - tracers = append(tracers, &metricsTracer{}) + tracers = append(tracers, newMetricsTracer()) } if len(tracers) > 0 { quicConf.Tracer = quiclogging.NewMultiplexedTracer(tracers...) diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/listener.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/listener.go index b71478fd..5f219fc6 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/listener.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/listener.go @@ -9,8 +9,8 @@ import ( "net" "sync" - "github.com/lucas-clemente/quic-go" ma "github.com/multiformats/go-multiaddr" + "github.com/quic-go/quic-go" ) var quicListen = quic.Listen // so we can mock it in tests diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/quic_multiaddr.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/quic_multiaddr.go index afd8fbb7..12eb7d8a 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/quic_multiaddr.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/quic_multiaddr.go @@ -4,9 +4,9 @@ import ( "errors" "net" - "github.com/lucas-clemente/quic-go" ma "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" + "github.com/quic-go/quic-go" ) var ( diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/reuse.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/reuse.go index 4cb46f23..684a7e0b 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/reuse.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/reuse.go @@ -110,7 +110,8 @@ func (r *reuse) gc() { select { case <-r.closeChan: return - case now := <-ticker.C: + case <-ticker.C: + now := time.Now() r.mutex.Lock() for key, conn := range r.global { if conn.ShouldGarbageCollect(now) { diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/tracer.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/tracer.go index f7a8767a..46a683cb 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/tracer.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/tracer.go @@ -9,8 +9,8 @@ import ( golog "github.com/ipfs/go-log/v2" "github.com/klauspost/compress/zstd" - "github.com/lucas-clemente/quic-go/logging" - "github.com/lucas-clemente/quic-go/qlog" + "github.com/quic-go/quic-go/logging" + "github.com/quic-go/quic-go/qlog" ) var log = golog.Logger("quic-utils") diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/tracer_metrics.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/tracer_metrics.go index 3282b747..03e73fd2 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/tracer_metrics.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/quicreuse/tracer_metrics.go @@ -9,9 +9,8 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" - - "github.com/lucas-clemente/quic-go" - "github.com/lucas-clemente/quic-go/logging" + "github.com/quic-go/quic-go" + "github.com/quic-go/quic-go/logging" ) var ( @@ -85,7 +84,9 @@ func (c *aggregatingCollector) RemoveConn(id string) { var collector *aggregatingCollector -func init() { +var initMetricsOnce sync.Once + +func initMetrics() { const ( direction = "direction" encLevel = "encryption_level" @@ -173,6 +174,11 @@ type metricsTracer struct { var _ logging.Tracer = &metricsTracer{} +func newMetricsTracer() *metricsTracer { + initMetricsOnce.Do(func() { initMetrics() }) + return &metricsTracer{} +} + func (m *metricsTracer) TracerForConnection(_ context.Context, p logging.Perspective, connID logging.ConnectionID) logging.ConnectionTracer { return &metricsConnTracer{perspective: p, connID: connID} } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/tcp/metrics.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/tcp/metrics.go index 0ae24801..fc2add49 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/tcp/metrics.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/tcp/metrics.go @@ -26,7 +26,9 @@ const collectFrequency = 10 * time.Second var collector *aggregatingCollector -func init() { +var initMetricsOnce sync.Once + +func initMetrics() { segsSentDesc = prometheus.NewDesc("tcp_sent_segments_total", "TCP segments sent", nil, nil) segsRcvdDesc = prometheus.NewDesc("tcp_rcvd_segments_total", "TCP segments received", nil, nil) bytesSentDesc = prometheus.NewDesc("tcp_sent_bytes", "TCP bytes sent", nil, nil) @@ -210,6 +212,7 @@ type tracingConn struct { } func newTracingConn(c manet.Conn, isClient bool) (*tracingConn, error) { + initMetricsOnce.Do(func() { initMetrics() }) conn, err := tcp.NewConn(c) if err != nil { return nil, err diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/tcp/tcp.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/tcp/tcp.go index b41fe7bf..f277b3f8 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/tcp/tcp.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/tcp/tcp.go @@ -181,14 +181,22 @@ func (t *TcpTransport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID) log.Debugw("resource manager blocked outgoing connection", "peer", p, "addr", raddr, "error", err) return nil, err } + + c, err := t.dialWithScope(ctx, raddr, p, connScope) + if err != nil { + connScope.Done() + return nil, err + } + return c, nil +} + +func (t *TcpTransport) dialWithScope(ctx context.Context, raddr ma.Multiaddr, p peer.ID, connScope network.ConnManagementScope) (transport.CapableConn, error) { if err := connScope.SetPeer(p); err != nil { log.Debugw("resource manager blocked outgoing connection for peer", "peer", p, "addr", raddr, "error", err) - connScope.Done() return nil, err } conn, err := t.maDial(ctx, raddr) if err != nil { - connScope.Done() return nil, err } // Set linger to 0 so we never get stuck in the TIME-WAIT state. When @@ -201,7 +209,6 @@ func (t *TcpTransport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID) var err error c, err = newTracingConn(conn, true) if err != nil { - connScope.Done() return nil, err } } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/addrs.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/addrs.go index 5fea8567..8dc24d38 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/addrs.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/addrs.go @@ -10,52 +10,28 @@ import ( manet "github.com/multiformats/go-multiaddr/net" ) -// Addr is an implementation of net.Addr for WebSocket. -type Addr struct { +// addrWrapper is an implementation of net.Addr for WebSocket. +type addrWrapper struct { *url.URL } -var _ net.Addr = (*Addr)(nil) +var _ net.Addr = addrWrapper{} // Network returns the network type for a WebSocket, "websocket". -func (addr *Addr) Network() string { +func (a addrWrapper) Network() string { return "websocket" } -// NewAddr creates an Addr with `ws` scheme (insecure). -// -// Deprecated. Use NewAddrWithScheme. -func NewAddr(host string) *Addr { - // Older versions of the transport only supported insecure connections (i.e. - // WS instead of WSS). Assume that is the case here. - return NewAddrWithScheme(host, false) -} - -// NewAddrWithScheme creates a new Addr using the given host string. isSecure -// should be true for WSS connections and false for WS. -func NewAddrWithScheme(host string, isSecure bool) *Addr { - scheme := "ws" - if isSecure { - scheme = "wss" - } - return &Addr{ - URL: &url.URL{ - Scheme: scheme, - Host: host, - }, - } -} - func ConvertWebsocketMultiaddrToNetAddr(maddr ma.Multiaddr) (net.Addr, error) { url, err := parseMultiaddr(maddr) if err != nil { return nil, err } - return &Addr{URL: url}, nil + return addrWrapper{URL: url}, nil } func ParseWebsocketNetAddr(a net.Addr) (ma.Multiaddr, error) { - wsa, ok := a.(*Addr) + wsa, ok := a.(addrWrapper) if !ok { return nil, fmt.Errorf("not a websocket address") } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/conn.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/conn.go index 30b70055..c3918c38 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/conn.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/conn.go @@ -1,156 +1,44 @@ package websocket import ( - "io" "net" - "sync" - "time" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/transport" - - ws "github.com/gorilla/websocket" ) -// GracefulCloseTimeout is the time to wait trying to gracefully close a -// connection before simply cutting it. -var GracefulCloseTimeout = 100 * time.Millisecond - -// Conn implements net.Conn interface for gorilla/websocket. -type Conn struct { - *ws.Conn - secure bool - DefaultMessageType int - reader io.Reader - closeOnce sync.Once - - readLock, writeLock sync.Mutex -} - -var _ net.Conn = (*Conn)(nil) - -// NewConn creates a Conn given a regular gorilla/websocket Conn. -func NewConn(raw *ws.Conn, secure bool) *Conn { - return &Conn{ - Conn: raw, - secure: secure, - DefaultMessageType: ws.BinaryMessage, - } -} - -func (c *Conn) Read(b []byte) (int, error) { - c.readLock.Lock() - defer c.readLock.Unlock() - - if c.reader == nil { - if err := c.prepNextReader(); err != nil { - return 0, err - } - } - - for { - n, err := c.reader.Read(b) - switch err { - case io.EOF: - c.reader = nil - - if n > 0 { - return n, nil - } - - if err := c.prepNextReader(); err != nil { - return 0, err - } - - // explicitly looping - default: - return n, err - } - } -} - -func (c *Conn) prepNextReader() error { - t, r, err := c.Conn.NextReader() - if err != nil { - if wserr, ok := err.(*ws.CloseError); ok { - if wserr.Code == 1000 || wserr.Code == 1005 { - return io.EOF - } - } - return err - } +const maxReadAttempts = 5 - if t == ws.CloseMessage { - return io.EOF - } - - c.reader = r - return nil +type conn struct { + net.Conn + readAttempts uint8 + localAddr addrWrapper + remoteAddr addrWrapper } -func (c *Conn) Write(b []byte) (n int, err error) { - c.writeLock.Lock() - defer c.writeLock.Unlock() +var _ net.Conn = conn{} - if err := c.Conn.WriteMessage(c.DefaultMessageType, b); err != nil { - return 0, err - } - - return len(b), nil -} - -// Close closes the connection. Only the first call to Close will receive the -// close error, subsequent and concurrent calls will return nil. -// This method is thread-safe. -func (c *Conn) Close() error { - var err error - c.closeOnce.Do(func() { - err1 := c.Conn.WriteControl( - ws.CloseMessage, - ws.FormatCloseMessage(ws.CloseNormalClosure, "closed"), - time.Now().Add(GracefulCloseTimeout), - ) - err2 := c.Conn.Close() - switch { - case err1 != nil: - err = err1 - case err2 != nil: - err = err2 - } - }) - return err +func (c conn) LocalAddr() net.Addr { + return c.localAddr } -func (c *Conn) LocalAddr() net.Addr { - return NewAddrWithScheme(c.Conn.LocalAddr().String(), c.secure) +func (c conn) RemoteAddr() net.Addr { + return c.remoteAddr } -func (c *Conn) RemoteAddr() net.Addr { - return NewAddrWithScheme(c.Conn.RemoteAddr().String(), c.secure) -} - -func (c *Conn) SetDeadline(t time.Time) error { - if err := c.SetReadDeadline(t); err != nil { - return err +func (c conn) Read(b []byte) (int, error) { + n, err := c.Conn.Read(b) + if err == nil && n == 0 && c.readAttempts < maxReadAttempts { + c.readAttempts++ + // Nothing happened, let's read again. We reached the end of the frame + // (https://github.com/nhooyr/websocket/blob/master/netconn.go#L118). + // The next read will block until we get + // the next frame. We limit here to avoid looping in case of a bunch of + // empty frames. Would be better if the websocket library did not + // return 0, nil here (see https://github.com/nhooyr/websocket/issues/367). But until, then this is our workaround. + return c.Read(b) } - - return c.SetWriteDeadline(t) -} - -func (c *Conn) SetReadDeadline(t time.Time) error { - // Don't lock when setting the read deadline. That would prevent us from - // interrupting an in-progress read. - return c.Conn.SetReadDeadline(t) -} - -func (c *Conn) SetWriteDeadline(t time.Time) error { - // Unlike the read deadline, we need to lock when setting the write - // deadline. - - c.writeLock.Lock() - defer c.writeLock.Unlock() - - return c.Conn.SetWriteDeadline(t) + return n, err } type capableConn struct { diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/listener.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/listener.go index 5e7ed098..ab9a73f8 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/listener.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/listener.go @@ -1,25 +1,33 @@ package websocket import ( + "context" "crypto/tls" "fmt" + "math" "net" "net/http" + "net/url" "github.com/libp2p/go-libp2p/core/transport" ma "github.com/multiformats/go-multiaddr" manet "github.com/multiformats/go-multiaddr/net" + + ws "nhooyr.io/websocket" ) type listener struct { nl net.Listener server http.Server + // The Go standard library sets the http.Server.TLSConfig no matter if this is a WS or WSS, + // so we can't rely on checking if server.TLSConfig is set. + isWss bool laddr ma.Multiaddr closed chan struct{} - incoming chan *Conn + incoming chan net.Conn } func (pwma *parsedWebsocketMultiaddr) toMultiaddr() ma.Multiaddr { @@ -71,11 +79,12 @@ func newListener(a ma.Multiaddr, tlsConf *tls.Config) (*listener, error) { ln := &listener{ nl: nl, laddr: parsed.toMultiaddr(), - incoming: make(chan *Conn), + incoming: make(chan net.Conn), closed: make(chan struct{}), } ln.server = http.Server{Handler: ln} if parsed.isWSS { + ln.isWss = true ln.server.TLSConfig = tlsConf } return ln, nil @@ -83,7 +92,7 @@ func newListener(a ma.Multiaddr, tlsConf *tls.Config) (*listener, error) { func (l *listener) serve() { defer close(l.closed) - if l.server.TLSConfig == nil { + if !l.isWss { l.server.Serve(l.nl) } else { l.server.ServeTLS(l.nl, "", "") @@ -91,16 +100,38 @@ func (l *listener) serve() { } func (l *listener) ServeHTTP(w http.ResponseWriter, r *http.Request) { - c, err := upgrader.Upgrade(w, r, nil) + scheme := "ws" + if l.isWss { + scheme = "wss" + } + + c, err := ws.Accept(w, r, &ws.AcceptOptions{ + // Allow requests from *all* origins. + InsecureSkipVerify: true, + }) if err != nil { // The upgrader writes a response for us. return } + // Set an arbitrarily large read limit since we don't actually want to limit the message size here. + // See https://github.com/nhooyr/websocket/issues/382 for details. + c.SetReadLimit(math.MaxInt64 - 1) // -1 because the library adds a byte for the fin frame + select { - case l.incoming <- NewConn(c, false): + case l.incoming <- conn{ + Conn: ws.NetConn(context.Background(), c, ws.MessageBinary), + localAddr: addrWrapper{&url.URL{ + Host: r.Context().Value(http.LocalAddrContextKey).(net.Addr).String(), + Scheme: scheme, + }}, + remoteAddr: addrWrapper{&url.URL{ + Host: r.RemoteAddr, + Scheme: scheme, + }}, + }: case <-l.closed: - c.Close() + c.Close(ws.StatusNormalClosure, "closed") } // The connection has been hijacked, it's safe to return. } diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/websocket.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/websocket.go index 03941013..a78add97 100644 --- a/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/websocket.go +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/websocket/websocket.go @@ -4,8 +4,11 @@ package websocket import ( "context" "crypto/tls" + "fmt" + "math" "net" "net/http" + "net/url" "time" "github.com/libp2p/go-libp2p/core/network" @@ -16,7 +19,7 @@ import ( mafmt "github.com/multiformats/go-multiaddr-fmt" manet "github.com/multiformats/go-multiaddr/net" - ws "github.com/gorilla/websocket" + ws "nhooyr.io/websocket" ) // WsFmt is multiaddr formatter for WsProtocol @@ -50,14 +53,6 @@ func init() { manet.RegisterToNetAddr(ConvertWebsocketMultiaddrToNetAddr, "wss") } -// Default gorilla upgrader -var upgrader = ws.Upgrader{ - // Allow requests from *all* origins. - CheckOrigin: func(r *http.Request) bool { - return true - }, -} - type Option func(*WebsocketTransport) error // WithTLSClientConfig sets a TLS client configuration on the WebSocket Dialer. Only @@ -161,11 +156,19 @@ func (t *WebsocketTransport) Dial(ctx context.Context, raddr ma.Multiaddr, p pee if err != nil { return nil, err } - macon, err := t.maDial(ctx, raddr) + c, err := t.dialWithScope(ctx, raddr, p, connScope) if err != nil { connScope.Done() return nil, err } + return c, nil +} + +func (t *WebsocketTransport) dialWithScope(ctx context.Context, raddr ma.Multiaddr, p peer.ID, connScope network.ConnManagementScope) (transport.CapableConn, error) { + macon, err := t.maDial(ctx, raddr) + if err != nil { + return nil, err + } conn, err := t.upgrader.Upgrade(ctx, t, macon, network.DirOutbound, p, connScope) if err != nil { return nil, err @@ -179,7 +182,26 @@ func (t *WebsocketTransport) maDial(ctx context.Context, raddr ma.Multiaddr) (ma return nil, err } isWss := wsurl.Scheme == "wss" - dialer := ws.Dialer{HandshakeTimeout: 30 * time.Second} + wsurlCopy := *wsurl + remoteAddr := addrWrapper{URL: &wsurlCopy} + localAddrChan := make(chan addrWrapper, 1) + + transport := &http.Transport{ + DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) { + conn, err := net.Dial(network, addr) + if err != nil { + close(localAddrChan) + return nil, err + } + localAddrChan <- addrWrapper{URL: &url.URL{Host: conn.LocalAddr().String(), Scheme: wsurl.Scheme}} + return conn, nil + }, + } + dialer := http.Client{ + Timeout: 30 * time.Second, + Transport: transport, + } + if isWss { sni := "" sni, err = raddr.ValueForProtocol(ma.P_SNI) @@ -190,31 +212,55 @@ func (t *WebsocketTransport) maDial(ctx context.Context, raddr ma.Multiaddr) (ma if sni != "" { copytlsClientConf := t.tlsClientConf.Clone() copytlsClientConf.ServerName = sni - dialer.TLSClientConfig = copytlsClientConf + transport.TLSClientConfig = copytlsClientConf ipAddr := wsurl.Host - // Setting the NetDial because we already have the resolved IP address, so we don't want to do another resolution. + // Setting the Dial because we already have the resolved IP address, so we don't want to do another resolution. // We set the `.Host` to the sni field so that the host header gets properly set. - dialer.NetDial = func(network, address string) (net.Conn, error) { + transport.DialContext = func(ctx context.Context, network, address string) (net.Conn, error) { tcpAddr, err := net.ResolveTCPAddr(network, ipAddr) if err != nil { + close(localAddrChan) + return nil, err + } + conn, err := net.DialTCP("tcp", nil, tcpAddr) + if err != nil { + close(localAddrChan) return nil, err } - return net.DialTCP("tcp", nil, tcpAddr) + localAddrChan <- addrWrapper{URL: &url.URL{Host: conn.LocalAddr().String(), Scheme: wsurl.Scheme}} + return conn, nil } wsurl.Host = sni + ":" + wsurl.Port() } else { - dialer.TLSClientConfig = t.tlsClientConf + transport.TLSClientConfig = t.tlsClientConf } } - wscon, _, err := dialer.DialContext(ctx, wsurl.String(), nil) + wscon, _, err := ws.Dial(ctx, wsurl.String(), &ws.DialOptions{ + HTTPClient: &dialer, + }) if err != nil { return nil, err } - mnc, err := manet.WrapNetConn(NewConn(wscon, isWss)) + // We need the local address of this connection, and afaict there's no other + // way of getting it besides hooking into the dial context func. + localAdddr, ok := <-localAddrChan + if !ok { + wscon.Close(ws.StatusNormalClosure, "closed. no local address") + return nil, fmt.Errorf("failed to get local address") + } + + // Set an arbitrarily large read limit since we don't actually want to limit the message size here. + wscon.SetReadLimit(math.MaxInt64 - 1) // -1 because the library adds a byte for the fin frame + mnc, err := manet.WrapNetConn( + conn{ + Conn: ws.NetConn(context.Background(), wscon, ws.MessageBinary), + localAddr: localAdddr, + remoteAddr: remoteAddr, + }) if err != nil { - wscon.Close() + wscon.Close(ws.StatusNormalClosure, "closed. err") return nil, err } return mnc, nil diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/cert_manager.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/cert_manager.go new file mode 100644 index 00000000..d48a0aa5 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/cert_manager.go @@ -0,0 +1,213 @@ +package libp2pwebtransport + +import ( + "context" + "crypto/sha256" + "crypto/tls" + "encoding/binary" + "fmt" + "sync" + "time" + + "github.com/benbjohnson/clock" + ic "github.com/libp2p/go-libp2p/core/crypto" + ma "github.com/multiformats/go-multiaddr" + "github.com/multiformats/go-multihash" +) + +// Allow for a bit of clock skew. +// When we generate a certificate, the NotBefore time is set to clockSkewAllowance before the current time. +// Similarly, we stop using a certificate one clockSkewAllowance before its expiry time. +const clockSkewAllowance = time.Hour +const validityMinusTwoSkew = certValidity - (2 * clockSkewAllowance) + +type certConfig struct { + tlsConf *tls.Config + sha256 [32]byte // cached from the tlsConf +} + +func (c *certConfig) Start() time.Time { return c.tlsConf.Certificates[0].Leaf.NotBefore } +func (c *certConfig) End() time.Time { return c.tlsConf.Certificates[0].Leaf.NotAfter } + +func newCertConfig(key ic.PrivKey, start, end time.Time) (*certConfig, error) { + conf, err := getTLSConf(key, start, end) + if err != nil { + return nil, err + } + return &certConfig{ + tlsConf: conf, + sha256: sha256.Sum256(conf.Certificates[0].Leaf.Raw), + }, nil +} + +// Certificate renewal logic: +// 1. On startup, we generate one cert that is valid from now (-1h, to allow for clock skew), and another +// cert that is valid from the expiry date of the first certificate (again, with allowance for clock skew). +// 2. Once we reach 1h before expiry of the first certificate, we switch over to the second certificate. +// At the same time, we stop advertising the certhash of the first cert and generate the next cert. +type certManager struct { + clock clock.Clock + ctx context.Context + ctxCancel context.CancelFunc + refCount sync.WaitGroup + + mx sync.RWMutex + lastConfig *certConfig // initially nil + currentConfig *certConfig + nextConfig *certConfig // nil until we have passed half the certValidity of the current config + addrComp ma.Multiaddr + + serializedCertHashes [][]byte +} + +func newCertManager(hostKey ic.PrivKey, clock clock.Clock) (*certManager, error) { + m := &certManager{clock: clock} + m.ctx, m.ctxCancel = context.WithCancel(context.Background()) + if err := m.init(hostKey); err != nil { + return nil, err + } + + m.background(hostKey) + return m, nil +} + +// getCurrentTimeBucket returns the canonical start time of the given time as +// bucketed by ranges of certValidity since unix epoch (plus an offset). This +// lets you get the same time ranges across reboots without having to persist +// state. +// ``` +// ... v--- epoch + offset +// ... |--------| |--------| ... +// ... |--------| |--------| ... +// ``` +func getCurrentBucketStartTime(now time.Time, offset time.Duration) time.Time { + currentBucket := (now.UnixMilli() - offset.Milliseconds()) / validityMinusTwoSkew.Milliseconds() + return time.UnixMilli(offset.Milliseconds() + currentBucket*validityMinusTwoSkew.Milliseconds()) +} + +func (m *certManager) init(hostKey ic.PrivKey) error { + start := m.clock.Now() + pubkeyBytes, err := hostKey.GetPublic().Raw() + if err != nil { + return err + } + + // We want to add a random offset to each start time so that not all certs + // rotate at the same time across the network. The offset represents moving + // the bucket start time some `offset` earlier. + offset := (time.Duration(binary.LittleEndian.Uint16(pubkeyBytes)) * time.Minute) % certValidity + + // We want the certificate have been valid for at least one clockSkewAllowance + start = start.Add(-clockSkewAllowance) + startTime := getCurrentBucketStartTime(start, offset) + m.nextConfig, err = newCertConfig(hostKey, startTime, startTime.Add(certValidity)) + if err != nil { + return err + } + return m.rollConfig(hostKey) +} + +func (m *certManager) rollConfig(hostKey ic.PrivKey) error { + // We stop using the current certificate clockSkewAllowance before its expiry time. + // At this point, the next certificate needs to be valid for one clockSkewAllowance. + nextStart := m.nextConfig.End().Add(-2 * clockSkewAllowance) + c, err := newCertConfig(hostKey, nextStart, nextStart.Add(certValidity)) + if err != nil { + return err + } + m.lastConfig = m.currentConfig + m.currentConfig = m.nextConfig + m.nextConfig = c + if err := m.cacheSerializedCertHashes(); err != nil { + return err + } + return m.cacheAddrComponent() +} + +func (m *certManager) background(hostKey ic.PrivKey) { + d := m.currentConfig.End().Add(-clockSkewAllowance).Sub(m.clock.Now()) + log.Debugw("setting timer", "duration", d.String()) + t := m.clock.Timer(d) + m.refCount.Add(1) + + go func() { + defer m.refCount.Done() + defer t.Stop() + + for { + select { + case <-m.ctx.Done(): + return + case <-t.C: + now := m.clock.Now() + m.mx.Lock() + if err := m.rollConfig(hostKey); err != nil { + log.Errorw("rolling config failed", "error", err) + } + d := m.currentConfig.End().Add(-clockSkewAllowance).Sub(now) + log.Debugw("rolling certificates", "next", d.String()) + t.Reset(d) + m.mx.Unlock() + } + } + }() +} + +func (m *certManager) GetConfig() *tls.Config { + m.mx.RLock() + defer m.mx.RUnlock() + return m.currentConfig.tlsConf +} + +func (m *certManager) AddrComponent() ma.Multiaddr { + m.mx.RLock() + defer m.mx.RUnlock() + return m.addrComp +} + +func (m *certManager) SerializedCertHashes() [][]byte { + return m.serializedCertHashes +} + +func (m *certManager) cacheSerializedCertHashes() error { + hashes := make([][32]byte, 0, 3) + if m.lastConfig != nil { + hashes = append(hashes, m.lastConfig.sha256) + } + hashes = append(hashes, m.currentConfig.sha256) + if m.nextConfig != nil { + hashes = append(hashes, m.nextConfig.sha256) + } + + m.serializedCertHashes = m.serializedCertHashes[:0] + for _, certHash := range hashes { + h, err := multihash.Encode(certHash[:], multihash.SHA2_256) + if err != nil { + return fmt.Errorf("failed to encode certificate hash: %w", err) + } + m.serializedCertHashes = append(m.serializedCertHashes, h) + } + return nil +} + +func (m *certManager) cacheAddrComponent() error { + addr, err := addrComponentForCert(m.currentConfig.sha256[:]) + if err != nil { + return err + } + if m.nextConfig != nil { + comp, err := addrComponentForCert(m.nextConfig.sha256[:]) + if err != nil { + return err + } + addr = addr.Encapsulate(comp) + } + m.addrComp = addr + return nil +} + +func (m *certManager) Close() error { + m.ctxCancel() + m.refCount.Wait() + return nil +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/conn.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/conn.go new file mode 100644 index 00000000..5815d9b6 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/conn.go @@ -0,0 +1,81 @@ +package libp2pwebtransport + +import ( + "context" + + "github.com/libp2p/go-libp2p/core/network" + tpt "github.com/libp2p/go-libp2p/core/transport" + + ma "github.com/multiformats/go-multiaddr" + "github.com/quic-go/webtransport-go" +) + +type connSecurityMultiaddrs struct { + network.ConnSecurity + network.ConnMultiaddrs +} + +type connMultiaddrs struct { + local, remote ma.Multiaddr +} + +var _ network.ConnMultiaddrs = &connMultiaddrs{} + +func (c *connMultiaddrs) LocalMultiaddr() ma.Multiaddr { return c.local } +func (c *connMultiaddrs) RemoteMultiaddr() ma.Multiaddr { return c.remote } + +type conn struct { + *connSecurityMultiaddrs + + transport *transport + session *webtransport.Session + + scope network.ConnScope +} + +var _ tpt.CapableConn = &conn{} + +func newConn(tr *transport, sess *webtransport.Session, sconn *connSecurityMultiaddrs, scope network.ConnScope) *conn { + return &conn{ + connSecurityMultiaddrs: sconn, + transport: tr, + session: sess, + scope: scope, + } +} + +func (c *conn) OpenStream(ctx context.Context) (network.MuxedStream, error) { + str, err := c.session.OpenStreamSync(ctx) + if err != nil { + return nil, err + } + return &stream{str}, nil +} + +func (c *conn) AcceptStream() (network.MuxedStream, error) { + str, err := c.session.AcceptStream(context.Background()) + if err != nil { + return nil, err + } + return &stream{str}, nil +} + +func (c *conn) allowWindowIncrease(size uint64) bool { + return c.scope.ReserveMemory(int(size), network.ReservationPriorityMedium) == nil +} + +// Close closes the connection. +// It must be called even if the peer closed the connection in order for +// garbage collection to properly work in this package. +func (c *conn) Close() error { + c.transport.removeConn(c.session) + return c.session.CloseWithError(0, "") +} + +func (c *conn) IsClosed() bool { return c.session.Context().Err() != nil } +func (c *conn) Scope() network.ConnScope { return c.scope } +func (c *conn) Transport() tpt.Transport { return c.transport } + +func (c *conn) ConnState() network.ConnectionState { + return network.ConnectionState{Transport: "webtransport"} +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/crypto.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/crypto.go new file mode 100644 index 00000000..bdc121c5 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/crypto.go @@ -0,0 +1,155 @@ +package libp2pwebtransport + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/sha256" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/binary" + "errors" + "fmt" + "io" + "math/big" + "time" + + "golang.org/x/crypto/hkdf" + + ic "github.com/libp2p/go-libp2p/core/crypto" + + "github.com/multiformats/go-multihash" + "github.com/quic-go/quic-go/http3" +) + +const deterministicCertInfo = "determinisitic cert" + +func getTLSConf(key ic.PrivKey, start, end time.Time) (*tls.Config, error) { + cert, priv, err := generateCert(key, start, end) + if err != nil { + return nil, err + } + return &tls.Config{ + Certificates: []tls.Certificate{{ + Certificate: [][]byte{cert.Raw}, + PrivateKey: priv, + Leaf: cert, + }}, + NextProtos: []string{http3.NextProtoH3}, + }, nil +} + +// generateCert generates certs deterministically based on the `key` and start +// time passed in. Uses `golang.org/x/crypto/hkdf`. +func generateCert(key ic.PrivKey, start, end time.Time) (*x509.Certificate, *ecdsa.PrivateKey, error) { + keyBytes, err := key.Raw() + if err != nil { + return nil, nil, err + } + + startTimeSalt := make([]byte, 8) + binary.LittleEndian.PutUint64(startTimeSalt, uint64(start.UnixNano())) + deterministicHKDFReader := newDeterministicReader(keyBytes, startTimeSalt, deterministicCertInfo) + + b := make([]byte, 8) + if _, err := deterministicHKDFReader.Read(b); err != nil { + return nil, nil, err + } + serial := int64(binary.BigEndian.Uint64(b)) + if serial < 0 { + serial = -serial + } + certTempl := &x509.Certificate{ + SerialNumber: big.NewInt(serial), + Subject: pkix.Name{}, + NotBefore: start, + NotAfter: end, + IsCA: true, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + } + + caPrivateKey, err := ecdsa.GenerateKey(elliptic.P256(), deterministicHKDFReader) + if err != nil { + return nil, nil, err + } + caBytes, err := x509.CreateCertificate(deterministicHKDFReader, certTempl, certTempl, caPrivateKey.Public(), caPrivateKey) + if err != nil { + return nil, nil, err + } + ca, err := x509.ParseCertificate(caBytes) + if err != nil { + return nil, nil, err + } + return ca, caPrivateKey, nil +} + +func verifyRawCerts(rawCerts [][]byte, certHashes []multihash.DecodedMultihash) error { + if len(rawCerts) < 1 { + return errors.New("no cert") + } + leaf := rawCerts[len(rawCerts)-1] + // The W3C WebTransport specification currently only allows SHA-256 certificates for serverCertificateHashes. + hash := sha256.Sum256(leaf) + var verified bool + for _, h := range certHashes { + if h.Code == multihash.SHA2_256 && bytes.Equal(h.Digest, hash[:]) { + verified = true + break + } + } + if !verified { + digests := make([][]byte, 0, len(certHashes)) + for _, h := range certHashes { + digests = append(digests, h.Digest) + } + return fmt.Errorf("cert hash not found: %#x (expected: %#x)", hash, digests) + } + + cert, err := x509.ParseCertificate(leaf) + if err != nil { + return err + } + // TODO: is this the best (and complete?) way to identify RSA certificates? + switch cert.SignatureAlgorithm { + case x509.SHA1WithRSA, x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA, x509.MD2WithRSA, x509.MD5WithRSA: + return errors.New("cert uses RSA") + } + if l := cert.NotAfter.Sub(cert.NotBefore); l > 14*24*time.Hour { + return fmt.Errorf("cert must not be valid for longer than 14 days (NotBefore: %s, NotAfter: %s, Length: %s)", cert.NotBefore, cert.NotAfter, l) + } + now := time.Now() + if now.Before(cert.NotBefore) || now.After(cert.NotAfter) { + return fmt.Errorf("cert not valid (NotBefore: %s, NotAfter: %s)", cert.NotBefore, cert.NotAfter) + } + return nil +} + +// deterministicReader is a hack. It counter-acts the Go library's attempt at +// making ECDSA signatures non-deterministic. Go adds non-determinism by +// randomly dropping a singly byte from the reader stream. This counteracts this +// by detecting when a read is a single byte and using a different reader +// instead. +type deterministicReader struct { + reader io.Reader + singleByteReader io.Reader +} + +func newDeterministicReader(seed []byte, salt []byte, info string) io.Reader { + reader := hkdf.New(sha256.New, seed, salt, []byte(info)) + singleByteReader := hkdf.New(sha256.New, seed, salt, []byte(info+" single byte")) + + return &deterministicReader{ + reader: reader, + singleByteReader: singleByteReader, + } +} + +func (r *deterministicReader) Read(p []byte) (n int, err error) { + if len(p) == 1 { + return r.singleByteReader.Read(p) + } + return r.reader.Read(p) +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/listener.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/listener.go new file mode 100644 index 00000000..4722e008 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/listener.go @@ -0,0 +1,218 @@ +package libp2pwebtransport + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "time" + + "github.com/libp2p/go-libp2p/core/network" + tpt "github.com/libp2p/go-libp2p/core/transport" + "github.com/libp2p/go-libp2p/p2p/security/noise" + "github.com/libp2p/go-libp2p/p2p/security/noise/pb" + "github.com/libp2p/go-libp2p/p2p/transport/quicreuse" + + ma "github.com/multiformats/go-multiaddr" + "github.com/quic-go/webtransport-go" +) + +var errClosed = errors.New("closed") + +const queueLen = 16 +const handshakeTimeout = 10 * time.Second + +type listener struct { + transport *transport + isStaticTLSConf bool + reuseListener quicreuse.Listener + + server webtransport.Server + + ctx context.Context + ctxCancel context.CancelFunc + + serverClosed chan struct{} // is closed when server.Serve returns + + addr net.Addr + multiaddr ma.Multiaddr + + queue chan tpt.CapableConn +} + +var _ tpt.Listener = &listener{} + +func newListener(reuseListener quicreuse.Listener, t *transport, isStaticTLSConf bool) (tpt.Listener, error) { + localMultiaddr, err := toWebtransportMultiaddr(reuseListener.Addr()) + if err != nil { + return nil, err + } + + ln := &listener{ + reuseListener: reuseListener, + transport: t, + isStaticTLSConf: isStaticTLSConf, + queue: make(chan tpt.CapableConn, queueLen), + serverClosed: make(chan struct{}), + addr: reuseListener.Addr(), + multiaddr: localMultiaddr, + server: webtransport.Server{ + CheckOrigin: func(r *http.Request) bool { return true }, + }, + } + ln.ctx, ln.ctxCancel = context.WithCancel(context.Background()) + mux := http.NewServeMux() + mux.HandleFunc(webtransportHTTPEndpoint, ln.httpHandler) + ln.server.H3.Handler = mux + go func() { + defer close(ln.serverClosed) + for { + conn, err := ln.reuseListener.Accept(context.Background()) + if err != nil { + log.Debugw("serving failed", "addr", ln.Addr(), "error", err) + return + } + go ln.server.ServeQUICConn(conn) + } + }() + return ln, nil +} + +func (l *listener) httpHandler(w http.ResponseWriter, r *http.Request) { + typ, ok := r.URL.Query()["type"] + if !ok || len(typ) != 1 || typ[0] != "noise" { + w.WriteHeader(http.StatusBadRequest) + return + } + remoteMultiaddr, err := stringToWebtransportMultiaddr(r.RemoteAddr) + if err != nil { + // This should never happen. + log.Errorw("converting remote address failed", "remote", r.RemoteAddr, "error", err) + w.WriteHeader(http.StatusBadRequest) + return + } + if l.transport.gater != nil && !l.transport.gater.InterceptAccept(&connMultiaddrs{local: l.multiaddr, remote: remoteMultiaddr}) { + w.WriteHeader(http.StatusForbidden) + return + } + + connScope, err := l.transport.rcmgr.OpenConnection(network.DirInbound, false, remoteMultiaddr) + if err != nil { + log.Debugw("resource manager blocked incoming connection", "addr", r.RemoteAddr, "error", err) + w.WriteHeader(http.StatusServiceUnavailable) + return + } + err = l.httpHandlerWithConnScope(w, r, connScope) + if err != nil { + connScope.Done() + } +} + +func (l *listener) httpHandlerWithConnScope(w http.ResponseWriter, r *http.Request, connScope network.ConnManagementScope) error { + sess, err := l.server.Upgrade(w, r) + if err != nil { + log.Debugw("upgrade failed", "error", err) + // TODO: think about the status code to use here + w.WriteHeader(500) + return err + } + ctx, cancel := context.WithTimeout(l.ctx, handshakeTimeout) + sconn, err := l.handshake(ctx, sess) + if err != nil { + cancel() + log.Debugw("handshake failed", "error", err) + sess.CloseWithError(1, "") + return err + } + cancel() + + if l.transport.gater != nil && !l.transport.gater.InterceptSecured(network.DirInbound, sconn.RemotePeer(), sconn) { + // TODO: can we close with a specific error here? + sess.CloseWithError(errorCodeConnectionGating, "") + return errors.New("gater blocked connection") + } + + if err := connScope.SetPeer(sconn.RemotePeer()); err != nil { + log.Debugw("resource manager blocked incoming connection for peer", "peer", sconn.RemotePeer(), "addr", r.RemoteAddr, "error", err) + sess.CloseWithError(1, "") + return err + } + + conn := newConn(l.transport, sess, sconn, connScope) + l.transport.addConn(sess, conn) + select { + case l.queue <- conn: + default: + log.Debugw("accept queue full, dropping incoming connection", "peer", sconn.RemotePeer(), "addr", r.RemoteAddr, "error", err) + sess.CloseWithError(1, "") + return errors.New("accept queue full") + } + + return nil +} + +func (l *listener) Accept() (tpt.CapableConn, error) { + select { + case <-l.ctx.Done(): + return nil, errClosed + case c := <-l.queue: + return c, nil + } +} + +func (l *listener) handshake(ctx context.Context, sess *webtransport.Session) (*connSecurityMultiaddrs, error) { + local, err := toWebtransportMultiaddr(sess.LocalAddr()) + if err != nil { + return nil, fmt.Errorf("error determiniting local addr: %w", err) + } + remote, err := toWebtransportMultiaddr(sess.RemoteAddr()) + if err != nil { + return nil, fmt.Errorf("error determiniting remote addr: %w", err) + } + + str, err := sess.AcceptStream(ctx) + if err != nil { + return nil, err + } + var earlyData [][]byte + if !l.isStaticTLSConf { + earlyData = l.transport.certManager.SerializedCertHashes() + } + + n, err := l.transport.noise.WithSessionOptions(noise.EarlyData( + nil, + newEarlyDataSender(&pb.NoiseExtensions{WebtransportCerthashes: earlyData}), + )) + if err != nil { + return nil, fmt.Errorf("failed to initialize Noise session: %w", err) + } + c, err := n.SecureInbound(ctx, &webtransportStream{Stream: str, wsess: sess}, "") + if err != nil { + return nil, err + } + + return &connSecurityMultiaddrs{ + ConnSecurity: c, + ConnMultiaddrs: &connMultiaddrs{local: local, remote: remote}, + }, nil +} + +func (l *listener) Addr() net.Addr { + return l.addr +} + +func (l *listener) Multiaddr() ma.Multiaddr { + if l.transport.certManager == nil { + return l.multiaddr + } + return l.multiaddr.Encapsulate(l.transport.certManager.AddrComponent()) +} + +func (l *listener) Close() error { + l.ctxCancel() + l.reuseListener.Close() + err := l.server.Close() + <-l.serverClosed + return err +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/multiaddr.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/multiaddr.go new file mode 100644 index 00000000..d6930af3 --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/multiaddr.go @@ -0,0 +1,107 @@ +package libp2pwebtransport + +import ( + "errors" + "fmt" + "net" + "strconv" + + ma "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" + "github.com/multiformats/go-multibase" + "github.com/multiformats/go-multihash" +) + +var webtransportMA = ma.StringCast("/quic-v1/webtransport") + +func toWebtransportMultiaddr(na net.Addr) (ma.Multiaddr, error) { + addr, err := manet.FromNetAddr(na) + if err != nil { + return nil, err + } + if _, err := addr.ValueForProtocol(ma.P_UDP); err != nil { + return nil, errors.New("not a UDP address") + } + return addr.Encapsulate(webtransportMA), nil +} + +func stringToWebtransportMultiaddr(str string) (ma.Multiaddr, error) { + host, portStr, err := net.SplitHostPort(str) + if err != nil { + return nil, err + } + port, err := strconv.ParseInt(portStr, 10, 32) + if err != nil { + return nil, err + } + ip := net.ParseIP(host) + if ip == nil { + return nil, errors.New("failed to parse IP") + } + return toWebtransportMultiaddr(&net.UDPAddr{IP: ip, Port: int(port)}) +} + +func extractCertHashes(addr ma.Multiaddr) ([]multihash.DecodedMultihash, error) { + certHashesStr := make([]string, 0, 2) + ma.ForEach(addr, func(c ma.Component) bool { + if c.Protocol().Code == ma.P_CERTHASH { + certHashesStr = append(certHashesStr, c.Value()) + } + return true + }) + certHashes := make([]multihash.DecodedMultihash, 0, len(certHashesStr)) + for _, s := range certHashesStr { + _, ch, err := multibase.Decode(s) + if err != nil { + return nil, fmt.Errorf("failed to multibase-decode certificate hash: %w", err) + } + dh, err := multihash.Decode(ch) + if err != nil { + return nil, fmt.Errorf("failed to multihash-decode certificate hash: %w", err) + } + certHashes = append(certHashes, *dh) + } + return certHashes, nil +} + +func addrComponentForCert(hash []byte) (ma.Multiaddr, error) { + mh, err := multihash.Encode(hash, multihash.SHA2_256) + if err != nil { + return nil, err + } + certStr, err := multibase.Encode(multibase.Base58BTC, mh) + if err != nil { + return nil, err + } + return ma.NewComponent(ma.ProtocolWithCode(ma.P_CERTHASH).Name, certStr) +} + +// IsWebtransportMultiaddr returns true if the given multiaddr is a well formed +// webtransport multiaddr. Returns the number of certhashes found. +func IsWebtransportMultiaddr(multiaddr ma.Multiaddr) (bool, int) { + const ( + init = iota + foundUDP + foundQuicV1 + foundWebTransport + ) + state := init + certhashCount := 0 + + ma.ForEach(multiaddr, func(c ma.Component) bool { + if c.Protocol().Code == ma.P_QUIC_V1 && state == init { + state = foundUDP + } + if c.Protocol().Code == ma.P_QUIC_V1 && state == foundUDP { + state = foundQuicV1 + } + if c.Protocol().Code == ma.P_WEBTRANSPORT && state == foundQuicV1 { + state = foundWebTransport + } + if c.Protocol().Code == ma.P_CERTHASH && state == foundWebTransport { + certhashCount++ + } + return true + }) + return state == foundWebTransport, certhashCount +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/noise_early_data.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/noise_early_data.go new file mode 100644 index 00000000..6ca8d9dd --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/noise_early_data.go @@ -0,0 +1,36 @@ +package libp2pwebtransport + +import ( + "context" + "net" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/p2p/security/noise" + "github.com/libp2p/go-libp2p/p2p/security/noise/pb" +) + +type earlyDataHandler struct { + earlyData *pb.NoiseExtensions + receive func(extensions *pb.NoiseExtensions) error +} + +var _ noise.EarlyDataHandler = &earlyDataHandler{} + +func newEarlyDataSender(earlyData *pb.NoiseExtensions) noise.EarlyDataHandler { + return &earlyDataHandler{earlyData: earlyData} +} + +func newEarlyDataReceiver(receive func(*pb.NoiseExtensions) error) noise.EarlyDataHandler { + return &earlyDataHandler{receive: receive} +} + +func (e *earlyDataHandler) Send(context.Context, net.Conn, peer.ID) *pb.NoiseExtensions { + return e.earlyData +} + +func (e *earlyDataHandler) Received(_ context.Context, _ net.Conn, ext *pb.NoiseExtensions) error { + if e.receive == nil { + return nil + } + return e.receive(ext) +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/stream.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/stream.go new file mode 100644 index 00000000..0849fc9f --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/stream.go @@ -0,0 +1,71 @@ +package libp2pwebtransport + +import ( + "errors" + "net" + + "github.com/libp2p/go-libp2p/core/network" + + "github.com/quic-go/webtransport-go" +) + +const ( + reset webtransport.StreamErrorCode = 0 +) + +type webtransportStream struct { + webtransport.Stream + wsess *webtransport.Session +} + +var _ net.Conn = &webtransportStream{} + +func (s *webtransportStream) LocalAddr() net.Addr { + return s.wsess.LocalAddr() +} + +func (s *webtransportStream) RemoteAddr() net.Addr { + return s.wsess.RemoteAddr() +} + +type stream struct { + webtransport.Stream +} + +var _ network.MuxedStream = &stream{} + +func (s *stream) Read(b []byte) (n int, err error) { + n, err = s.Stream.Read(b) + if err != nil && errors.Is(err, &webtransport.StreamError{}) { + err = network.ErrReset + } + return n, err +} + +func (s *stream) Write(b []byte) (n int, err error) { + n, err = s.Stream.Write(b) + if err != nil && errors.Is(err, &webtransport.StreamError{}) { + err = network.ErrReset + } + return n, err +} + +func (s *stream) Reset() error { + s.Stream.CancelRead(reset) + s.Stream.CancelWrite(reset) + return nil +} + +func (s *stream) Close() error { + s.Stream.CancelRead(reset) + return s.Stream.Close() +} + +func (s *stream) CloseRead() error { + s.Stream.CancelRead(reset) + return nil +} + +func (s *stream) CloseWrite() error { + return s.Stream.Close() +} diff --git a/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/transport.go b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/transport.go new file mode 100644 index 00000000..f9c68ddf --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/p2p/transport/webtransport/transport.go @@ -0,0 +1,414 @@ +package libp2pwebtransport + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io" + "sync" + "sync/atomic" + "time" + + "github.com/libp2p/go-libp2p/core/connmgr" + ic "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/pnet" + tpt "github.com/libp2p/go-libp2p/core/transport" + "github.com/libp2p/go-libp2p/p2p/security/noise" + "github.com/libp2p/go-libp2p/p2p/security/noise/pb" + "github.com/libp2p/go-libp2p/p2p/transport/quicreuse" + + "github.com/benbjohnson/clock" + logging "github.com/ipfs/go-log/v2" + ma "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" + "github.com/multiformats/go-multihash" + "github.com/quic-go/quic-go" + "github.com/quic-go/quic-go/http3" + "github.com/quic-go/webtransport-go" +) + +var log = logging.Logger("webtransport") + +const webtransportHTTPEndpoint = "/.well-known/libp2p-webtransport" + +const errorCodeConnectionGating = 0x47415445 // GATE in ASCII + +const certValidity = 14 * 24 * time.Hour + +type Option func(*transport) error + +func WithClock(cl clock.Clock) Option { + return func(t *transport) error { + t.clock = cl + return nil + } +} + +// WithTLSClientConfig sets a custom tls.Config used for dialing. +// This option is most useful for setting a custom tls.Config.RootCAs certificate pool. +// When dialing a multiaddr that contains a /certhash component, this library will set InsecureSkipVerify and +// overwrite the VerifyPeerCertificate callback. +func WithTLSClientConfig(c *tls.Config) Option { + return func(t *transport) error { + t.tlsClientConf = c + return nil + } +} + +type transport struct { + privKey ic.PrivKey + pid peer.ID + clock clock.Clock + + connManager *quicreuse.ConnManager + rcmgr network.ResourceManager + gater connmgr.ConnectionGater + + listenOnce sync.Once + listenOnceErr error + certManager *certManager + hasCertManager atomic.Bool // set to true once the certManager is initialized + staticTLSConf *tls.Config + tlsClientConf *tls.Config + + noise *noise.Transport + + connMx sync.Mutex + conns map[uint64]*conn // using quic-go's ConnectionTracingKey as map key +} + +var _ tpt.Transport = &transport{} +var _ tpt.Resolver = &transport{} +var _ io.Closer = &transport{} + +func New(key ic.PrivKey, psk pnet.PSK, connManager *quicreuse.ConnManager, gater connmgr.ConnectionGater, rcmgr network.ResourceManager, opts ...Option) (tpt.Transport, error) { + if len(psk) > 0 { + log.Error("WebTransport doesn't support private networks yet.") + return nil, errors.New("WebTransport doesn't support private networks yet") + } + if rcmgr == nil { + rcmgr = &network.NullResourceManager{} + } + id, err := peer.IDFromPrivateKey(key) + if err != nil { + return nil, err + } + t := &transport{ + pid: id, + privKey: key, + rcmgr: rcmgr, + gater: gater, + clock: clock.New(), + connManager: connManager, + conns: map[uint64]*conn{}, + } + for _, opt := range opts { + if err := opt(t); err != nil { + return nil, err + } + } + n, err := noise.New(noise.ID, key, nil) + if err != nil { + return nil, err + } + t.noise = n + return t, nil +} + +func (t *transport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID) (tpt.CapableConn, error) { + scope, err := t.rcmgr.OpenConnection(network.DirOutbound, false, raddr) + if err != nil { + log.Debugw("resource manager blocked outgoing connection", "peer", p, "addr", raddr, "error", err) + return nil, err + } + + c, err := t.dialWithScope(ctx, raddr, p, scope) + if err != nil { + scope.Done() + return nil, err + } + + return c, nil +} + +func (t *transport) dialWithScope(ctx context.Context, raddr ma.Multiaddr, p peer.ID, scope network.ConnManagementScope) (tpt.CapableConn, error) { + _, addr, err := manet.DialArgs(raddr) + if err != nil { + return nil, err + } + url := fmt.Sprintf("https://%s%s?type=noise", addr, webtransportHTTPEndpoint) + certHashes, err := extractCertHashes(raddr) + if err != nil { + return nil, err + } + + if len(certHashes) == 0 { + return nil, errors.New("can't dial webtransport without certhashes") + } + + sni, _ := extractSNI(raddr) + + if err := scope.SetPeer(p); err != nil { + log.Debugw("resource manager blocked outgoing connection for peer", "peer", p, "addr", raddr, "error", err) + return nil, err + } + + maddr, _ := ma.SplitFunc(raddr, func(c ma.Component) bool { return c.Protocol().Code == ma.P_WEBTRANSPORT }) + sess, err := t.dial(ctx, maddr, url, sni, certHashes) + if err != nil { + return nil, err + } + sconn, err := t.upgrade(ctx, sess, p, certHashes) + if err != nil { + sess.CloseWithError(1, "") + return nil, err + } + if t.gater != nil && !t.gater.InterceptSecured(network.DirOutbound, p, sconn) { + sess.CloseWithError(errorCodeConnectionGating, "") + return nil, fmt.Errorf("secured connection gated") + } + conn := newConn(t, sess, sconn, scope) + t.addConn(sess, conn) + return conn, nil +} + +func (t *transport) dial(ctx context.Context, addr ma.Multiaddr, url, sni string, certHashes []multihash.DecodedMultihash) (*webtransport.Session, error) { + var tlsConf *tls.Config + if t.tlsClientConf != nil { + tlsConf = t.tlsClientConf.Clone() + } else { + tlsConf = &tls.Config{} + } + tlsConf.NextProtos = append(tlsConf.NextProtos, http3.NextProtoH3) + + if sni != "" { + tlsConf.ServerName = sni + } + + if len(certHashes) > 0 { + // This is not insecure. We verify the certificate ourselves. + // See https://www.w3.org/TR/webtransport/#certificate-hashes. + tlsConf.InsecureSkipVerify = true + tlsConf.VerifyPeerCertificate = func(rawCerts [][]byte, _ [][]*x509.Certificate) error { + return verifyRawCerts(rawCerts, certHashes) + } + } + conn, err := t.connManager.DialQUIC(ctx, addr, tlsConf, t.allowWindowIncrease) + if err != nil { + return nil, err + } + dialer := webtransport.Dialer{ + RoundTripper: &http3.RoundTripper{ + Dial: func(ctx context.Context, addr string, tlsCfg *tls.Config, cfg *quic.Config) (quic.EarlyConnection, error) { + return conn.(quic.EarlyConnection), nil + }, + }, + } + rsp, sess, err := dialer.Dial(ctx, url, nil) + if err != nil { + return nil, err + } + if rsp.StatusCode < 200 || rsp.StatusCode > 299 { + return nil, fmt.Errorf("invalid response status code: %d", rsp.StatusCode) + } + return sess, err +} + +func (t *transport) upgrade(ctx context.Context, sess *webtransport.Session, p peer.ID, certHashes []multihash.DecodedMultihash) (*connSecurityMultiaddrs, error) { + local, err := toWebtransportMultiaddr(sess.LocalAddr()) + if err != nil { + return nil, fmt.Errorf("error determining local addr: %w", err) + } + remote, err := toWebtransportMultiaddr(sess.RemoteAddr()) + if err != nil { + return nil, fmt.Errorf("error determining remote addr: %w", err) + } + + str, err := sess.OpenStreamSync(ctx) + if err != nil { + return nil, err + } + + // Now run a Noise handshake (using early data) and get all the certificate hashes from the server. + // We will verify that the certhashes we used to dial is a subset of the certhashes we received from the server. + var verified bool + n, err := t.noise.WithSessionOptions(noise.EarlyData(newEarlyDataReceiver(func(b *pb.NoiseExtensions) error { + decodedCertHashes, err := decodeCertHashesFromProtobuf(b.WebtransportCerthashes) + if err != nil { + return err + } + for _, sent := range certHashes { + var found bool + for _, rcvd := range decodedCertHashes { + if sent.Code == rcvd.Code && bytes.Equal(sent.Digest, rcvd.Digest) { + found = true + break + } + } + if !found { + return fmt.Errorf("missing cert hash: %v", sent) + } + } + verified = true + return nil + }), nil)) + if err != nil { + return nil, fmt.Errorf("failed to create Noise transport: %w", err) + } + c, err := n.SecureOutbound(ctx, &webtransportStream{Stream: str, wsess: sess}, p) + if err != nil { + return nil, err + } + // The Noise handshake _should_ guarantee that our verification callback is called. + // Double-check just in case. + if !verified { + return nil, errors.New("didn't verify") + } + return &connSecurityMultiaddrs{ + ConnSecurity: c, + ConnMultiaddrs: &connMultiaddrs{local: local, remote: remote}, + }, nil +} + +func decodeCertHashesFromProtobuf(b [][]byte) ([]multihash.DecodedMultihash, error) { + hashes := make([]multihash.DecodedMultihash, 0, len(b)) + for _, h := range b { + dh, err := multihash.Decode(h) + if err != nil { + return nil, fmt.Errorf("failed to decode hash: %w", err) + } + hashes = append(hashes, *dh) + } + return hashes, nil +} + +func (t *transport) CanDial(addr ma.Multiaddr) bool { + ok, _ := IsWebtransportMultiaddr(addr) + return ok +} + +func (t *transport) Listen(laddr ma.Multiaddr) (tpt.Listener, error) { + isWebTransport, _ := IsWebtransportMultiaddr(laddr) + if !isWebTransport { + return nil, fmt.Errorf("cannot listen on non-WebTransport addr: %s", laddr) + } + if t.staticTLSConf == nil { + t.listenOnce.Do(func() { + t.certManager, t.listenOnceErr = newCertManager(t.privKey, t.clock) + t.hasCertManager.Store(true) + }) + if t.listenOnceErr != nil { + return nil, t.listenOnceErr + } + } else { + return nil, errors.New("static TLS config not supported on WebTransport") + } + tlsConf := t.staticTLSConf.Clone() + if tlsConf == nil { + tlsConf = &tls.Config{GetConfigForClient: func(*tls.ClientHelloInfo) (*tls.Config, error) { + return t.certManager.GetConfig(), nil + }} + } + tlsConf.NextProtos = append(tlsConf.NextProtos, http3.NextProtoH3) + + ln, err := t.connManager.ListenQUIC(laddr, tlsConf, t.allowWindowIncrease) + if err != nil { + return nil, err + } + return newListener(ln, t, t.staticTLSConf != nil) +} + +func (t *transport) Protocols() []int { + return []int{ma.P_WEBTRANSPORT} +} + +func (t *transport) Proxy() bool { + return false +} + +func (t *transport) Close() error { + t.listenOnce.Do(func() {}) + if t.certManager != nil { + return t.certManager.Close() + } + return nil +} + +func (t *transport) allowWindowIncrease(conn quic.Connection, size uint64) bool { + t.connMx.Lock() + defer t.connMx.Unlock() + + c, ok := t.conns[conn.Context().Value(quic.ConnectionTracingKey).(uint64)] + if !ok { + return false + } + return c.allowWindowIncrease(size) +} + +func (t *transport) addConn(sess *webtransport.Session, c *conn) { + t.connMx.Lock() + t.conns[sess.Context().Value(quic.ConnectionTracingKey).(uint64)] = c + t.connMx.Unlock() +} + +func (t *transport) removeConn(sess *webtransport.Session) { + t.connMx.Lock() + delete(t.conns, sess.Context().Value(quic.ConnectionTracingKey).(uint64)) + t.connMx.Unlock() +} + +// extractSNI returns what the SNI should be for the given maddr. If there is an +// SNI component in the multiaddr, then it will be returned and +// foundSniComponent will be true. If there's no SNI component, but there is a +// DNS-like component, then that will be returned for the sni and +// foundSniComponent will be false (since we didn't find an actual sni component). +func extractSNI(maddr ma.Multiaddr) (sni string, foundSniComponent bool) { + ma.ForEach(maddr, func(c ma.Component) bool { + switch c.Protocol().Code { + case ma.P_SNI: + sni = c.Value() + foundSniComponent = true + return false + case ma.P_DNS, ma.P_DNS4, ma.P_DNS6, ma.P_DNSADDR: + sni = c.Value() + // Keep going in case we find an `sni` component + return true + } + return true + }) + return sni, foundSniComponent +} + +// Resolve implements transport.Resolver +func (t *transport) Resolve(_ context.Context, maddr ma.Multiaddr) ([]ma.Multiaddr, error) { + sni, foundSniComponent := extractSNI(maddr) + + if foundSniComponent || sni == "" { + // The multiaddr already had an sni field, we can keep using it. Or we don't have any sni like thing + return []ma.Multiaddr{maddr}, nil + } + + beforeQuicMA, afterIncludingQuicMA := ma.SplitFunc(maddr, func(c ma.Component) bool { + return c.Protocol().Code == ma.P_QUIC_V1 + }) + quicComponent, afterQuicMA := ma.SplitFirst(afterIncludingQuicMA) + sniComponent, err := ma.NewComponent(ma.ProtocolWithCode(ma.P_SNI).Name, sni) + if err != nil { + return nil, err + } + return []ma.Multiaddr{beforeQuicMA.Encapsulate(quicComponent).Encapsulate(sniComponent).Encapsulate(afterQuicMA)}, nil +} + +// AddCertHashes adds the current certificate hashes to a multiaddress. +// If called before Listen, it's a no-op. +func (t *transport) AddCertHashes(m ma.Multiaddr) (ma.Multiaddr, bool) { + if !t.hasCertManager.Load() { + return m, false + } + return m.Encapsulate(t.certManager.AddrComponent()), true +} diff --git a/vendor/github.com/libp2p/go-libp2p/tools.go b/vendor/github.com/libp2p/go-libp2p/tools.go new file mode 100644 index 00000000..46a8037d --- /dev/null +++ b/vendor/github.com/libp2p/go-libp2p/tools.go @@ -0,0 +1,9 @@ +//go:build tools + +package libp2p + +import ( + _ "github.com/golang/mock/mockgen" + _ "golang.org/x/tools/cmd/goimports" + _ "google.golang.org/protobuf/cmd/protoc-gen-go" +) diff --git a/vendor/github.com/libp2p/go-libp2p/version.json b/vendor/github.com/libp2p/go-libp2p/version.json index 48cf18fa..b5580261 100644 --- a/vendor/github.com/libp2p/go-libp2p/version.json +++ b/vendor/github.com/libp2p/go-libp2p/version.json @@ -1,3 +1,3 @@ { - "version": "v0.24.2" + "version": "v0.27.8" } diff --git a/vendor/github.com/libp2p/go-msgio/pbio/interfaces.go b/vendor/github.com/libp2p/go-msgio/pbio/interfaces.go new file mode 100644 index 00000000..e3dffb7b --- /dev/null +++ b/vendor/github.com/libp2p/go-msgio/pbio/interfaces.go @@ -0,0 +1,40 @@ +// Package pbio reads and writes varint-prefix protobufs, using Google's Protobuf package. +package pbio + +import ( + "io" + + "google.golang.org/protobuf/proto" +) + +type Writer interface { + WriteMsg(proto.Message) error +} + +type WriteCloser interface { + Writer + io.Closer +} + +type Reader interface { + ReadMsg(msg proto.Message) error +} + +type ReadCloser interface { + Reader + io.Closer +} + +func getSize(v interface{}) (int, bool) { + if sz, ok := v.(interface { + Size() (n int) + }); ok { + return sz.Size(), true + } else if sz, ok := v.(interface { + ProtoSize() (n int) + }); ok { + return sz.ProtoSize(), true + } else { + return 0, false + } +} diff --git a/vendor/github.com/libp2p/go-msgio/pbio/uvarint_reader.go b/vendor/github.com/libp2p/go-msgio/pbio/uvarint_reader.go new file mode 100644 index 00000000..41521476 --- /dev/null +++ b/vendor/github.com/libp2p/go-msgio/pbio/uvarint_reader.go @@ -0,0 +1,93 @@ +// Adapted from gogo/protobuf to use multiformats/go-varint for +// efficient, interoperable length-prefixing. +// +// # Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// - Redistributions of source code must retain the above copyright +// +// notice, this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above +// +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +package pbio + +import ( + "bufio" + "fmt" + "io" + "os" + "runtime/debug" + + "google.golang.org/protobuf/proto" + + "github.com/multiformats/go-varint" +) + +type uvarintReader struct { + r *bufio.Reader + buf []byte + maxSize int + closer io.Closer +} + +func NewDelimitedReader(r io.Reader, maxSize int) ReadCloser { + var closer io.Closer + if c, ok := r.(io.Closer); ok { + closer = c + } + return &uvarintReader{bufio.NewReader(r), nil, maxSize, closer} +} + +func (ur *uvarintReader) ReadMsg(msg proto.Message) (err error) { + defer func() { + if rerr := recover(); rerr != nil { + fmt.Fprintf(os.Stderr, "caught panic: %s\n%s\n", rerr, debug.Stack()) + err = fmt.Errorf("panic reading message: %s", rerr) + } + }() + + length64, err := varint.ReadUvarint(ur.r) + if err != nil { + return err + } + length := int(length64) + if length < 0 || length > ur.maxSize { + return io.ErrShortBuffer + } + if len(ur.buf) < length { + ur.buf = make([]byte, length) + } + buf := ur.buf[:length] + if _, err := io.ReadFull(ur.r, buf); err != nil { + return err + } + return proto.Unmarshal(buf, msg) +} + +func (ur *uvarintReader) Close() error { + if ur.closer != nil { + return ur.closer.Close() + } + return nil +} diff --git a/vendor/github.com/libp2p/go-msgio/pbio/uvarint_writer.go b/vendor/github.com/libp2p/go-msgio/pbio/uvarint_writer.go new file mode 100644 index 00000000..2f18059f --- /dev/null +++ b/vendor/github.com/libp2p/go-msgio/pbio/uvarint_writer.go @@ -0,0 +1,103 @@ +// Adapted from gogo/protobuf to use multiformats/go-varint for +// efficient, interoperable length-prefixing. +// +// # Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// - Redistributions of source code must retain the above copyright +// +// notice, this list of conditions and the following disclaimer. +// - Redistributions in binary form must reproduce the above +// +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +package pbio + +import ( + "fmt" + "io" + "os" + "runtime/debug" + + "google.golang.org/protobuf/proto" + + "github.com/multiformats/go-varint" +) + +type uvarintWriter struct { + w io.Writer + lenBuf []byte + buffer []byte +} + +func NewDelimitedWriter(w io.Writer) WriteCloser { + return &uvarintWriter{w, make([]byte, varint.MaxLenUvarint63), nil} +} + +func (uw *uvarintWriter) WriteMsg(msg proto.Message) (err error) { + defer func() { + if rerr := recover(); rerr != nil { + fmt.Fprintf(os.Stderr, "caught panic: %s\n%s\n", rerr, debug.Stack()) + err = fmt.Errorf("panic reading message: %s", rerr) + } + }() + + var data []byte + if m, ok := msg.(interface { + MarshalTo(data []byte) (n int, err error) + }); ok { + n, ok := getSize(m) + if ok { + if n+varint.MaxLenUvarint63 >= len(uw.buffer) { + uw.buffer = make([]byte, n+varint.MaxLenUvarint63) + } + lenOff := varint.PutUvarint(uw.buffer, uint64(n)) + _, err = m.MarshalTo(uw.buffer[lenOff:]) + if err != nil { + return err + } + _, err = uw.w.Write(uw.buffer[:lenOff+n]) + return err + } + } + + // fallback + data, err = proto.Marshal(msg) + if err != nil { + return err + } + length := uint64(len(data)) + n := varint.PutUvarint(uw.lenBuf, length) + _, err = uw.w.Write(uw.lenBuf[:n]) + if err != nil { + return err + } + _, err = uw.w.Write(data) + return err +} + +func (uw *uvarintWriter) Close() error { + if closer, ok := uw.w.(io.Closer); ok { + return closer.Close() + } + return nil +} diff --git a/vendor/github.com/libp2p/go-openssl/.gitignore b/vendor/github.com/libp2p/go-openssl/.gitignore deleted file mode 100644 index 805d350b..00000000 --- a/vendor/github.com/libp2p/go-openssl/.gitignore +++ /dev/null @@ -1 +0,0 @@ -openssl.test diff --git a/vendor/github.com/libp2p/go-openssl/AUTHORS b/vendor/github.com/libp2p/go-openssl/AUTHORS deleted file mode 100644 index a048c1ea..00000000 --- a/vendor/github.com/libp2p/go-openssl/AUTHORS +++ /dev/null @@ -1,24 +0,0 @@ -Andrew Brampton -Anton Baklanov -Carlos Martín Nieto -Charles Strahan -Christopher Dudley -Christopher Fredericks -Colin Misare -dequis -Gabriel Russell -Giulio -Jakob Unterwurzacher -Juuso Haavisto -kujenga -Phus Lu -Russ Egan -Ryan Hileman -Scott J. Goldman -Scott Kidder -Space Monkey, Inc -Stephen Gallagher -Viacheslav Biriukov -Zack Owens -Ramesh Rayaprolu -Paras Shah diff --git a/vendor/github.com/libp2p/go-openssl/LICENSE b/vendor/github.com/libp2p/go-openssl/LICENSE deleted file mode 100644 index 37ec93a1..00000000 --- a/vendor/github.com/libp2p/go-openssl/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/libp2p/go-openssl/README.md b/vendor/github.com/libp2p/go-openssl/README.md deleted file mode 100644 index 62ac7dcd..00000000 --- a/vendor/github.com/libp2p/go-openssl/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# OpenSSL bindings for Go - -Forked from https://github.com/spacemonkeygo/openssl (unmaintained) to add: - -1. FreeBSD support. -2. Key equality checking. -3. A function to get the size of signatures produced by a key. - ---- - -Please see http://godoc.org/github.com/libp2p/go-openssl for more info - ---- - -### License - -Copyright (C) 2017. See AUTHORS. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -### Using on macOS -1. Install [homebrew](http://brew.sh/) -2. `$ brew install openssl` or `$ brew install openssl@1.1` - -### Using on Windows -1. Install [mingw-w64](http://mingw-w64.sourceforge.net/) -2. Install [pkg-config-lite](http://sourceforge.net/projects/pkgconfiglite) -3. Build (or install precompiled) openssl for mingw32-w64 -4. Set __PKG\_CONFIG\_PATH__ to the directory containing openssl.pc - (i.e. c:\mingw64\mingw64\lib\pkgconfig) diff --git a/vendor/github.com/libp2p/go-openssl/alloc.go b/vendor/github.com/libp2p/go-openssl/alloc.go deleted file mode 100644 index 25d064a2..00000000 --- a/vendor/github.com/libp2p/go-openssl/alloc.go +++ /dev/null @@ -1,19 +0,0 @@ -package openssl - -// #include "shim.h" -import "C" - -import ( - "unsafe" - - "github.com/mattn/go-pointer" -) - -//export go_ssl_crypto_ex_free -func go_ssl_crypto_ex_free( - parent *C.void, ptr unsafe.Pointer, - cryptoData *C.CRYPTO_EX_DATA, idx C.int, - argl C.long, argp *C.void, -) { - pointer.Unref(ptr) -} diff --git a/vendor/github.com/libp2p/go-openssl/bio.go b/vendor/github.com/libp2p/go-openssl/bio.go deleted file mode 100644 index caf2b37a..00000000 --- a/vendor/github.com/libp2p/go-openssl/bio.go +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -// #include "shim.h" -import "C" - -import ( - "errors" - "io" - "reflect" - "sync" - "unsafe" -) - -const ( - SSLRecordSize = 16 * 1024 -) - -func nonCopyGoBytes(ptr uintptr, length int) []byte { - var slice []byte - header := (*reflect.SliceHeader)(unsafe.Pointer(&slice)) - header.Cap = length - header.Len = length - header.Data = ptr - return slice -} - -func nonCopyCString(data *C.char, size C.int) []byte { - return nonCopyGoBytes(uintptr(unsafe.Pointer(data)), int(size)) -} - -var writeBioMapping = newMapping() - -type writeBio struct { - data_mtx sync.Mutex - op_mtx sync.Mutex - buf []byte - release_buffers bool -} - -func loadWritePtr(b *C.BIO) *writeBio { - t := token(C.X_BIO_get_data(b)) - return (*writeBio)(writeBioMapping.Get(t)) -} - -func bioClearRetryFlags(b *C.BIO) { - C.X_BIO_clear_flags(b, C.BIO_FLAGS_RWS|C.BIO_FLAGS_SHOULD_RETRY) -} - -func bioSetRetryRead(b *C.BIO) { - C.X_BIO_set_flags(b, C.BIO_FLAGS_READ|C.BIO_FLAGS_SHOULD_RETRY) -} - -//export go_write_bio_write -func go_write_bio_write(b *C.BIO, data *C.char, size C.int) (rc C.int) { - defer func() { - if err := recover(); err != nil { - logger.Critf("openssl: writeBioWrite panic'd: %v", err) - rc = -1 - } - }() - ptr := loadWritePtr(b) - if ptr == nil || data == nil || size < 0 { - return -1 - } - ptr.data_mtx.Lock() - defer ptr.data_mtx.Unlock() - bioClearRetryFlags(b) - ptr.buf = append(ptr.buf, nonCopyCString(data, size)...) - return size -} - -//export go_write_bio_ctrl -func go_write_bio_ctrl(b *C.BIO, cmd C.int, arg1 C.long, arg2 unsafe.Pointer) ( - rc C.long) { - defer func() { - if err := recover(); err != nil { - logger.Critf("openssl: writeBioCtrl panic'd: %v", err) - rc = -1 - } - }() - switch cmd { - case C.BIO_CTRL_WPENDING: - return writeBioPending(b) - case C.BIO_CTRL_DUP, C.BIO_CTRL_FLUSH: - return 1 - default: - return 0 - } -} - -func writeBioPending(b *C.BIO) C.long { - ptr := loadWritePtr(b) - if ptr == nil { - return 0 - } - ptr.data_mtx.Lock() - defer ptr.data_mtx.Unlock() - return C.long(len(ptr.buf)) -} - -func (wb *writeBio) WriteTo(w io.Writer) (rv int64, err error) { - wb.op_mtx.Lock() - defer wb.op_mtx.Unlock() - - // write whatever data we currently have - wb.data_mtx.Lock() - data := wb.buf - wb.data_mtx.Unlock() - - if len(data) == 0 { - return 0, nil - } - n, err := w.Write(data) - - // subtract however much data we wrote from the buffer - wb.data_mtx.Lock() - wb.buf = wb.buf[:copy(wb.buf, wb.buf[n:])] - if wb.release_buffers && len(wb.buf) == 0 { - wb.buf = nil - } - wb.data_mtx.Unlock() - - return int64(n), err -} - -func (wb *writeBio) Disconnect(b *C.BIO) { - if loadWritePtr(b) == wb { - writeBioMapping.Del(token(C.X_BIO_get_data(b))) - C.X_BIO_set_data(b, nil) - } -} - -func (wb *writeBio) MakeCBIO() *C.BIO { - rv := C.X_BIO_new_write_bio() - token := writeBioMapping.Add(unsafe.Pointer(wb)) - C.X_BIO_set_data(rv, unsafe.Pointer(token)) - return rv -} - -var readBioMapping = newMapping() - -type readBio struct { - data_mtx sync.Mutex - op_mtx sync.Mutex - buf []byte - eof bool - release_buffers bool -} - -func loadReadPtr(b *C.BIO) *readBio { - return (*readBio)(readBioMapping.Get(token(C.X_BIO_get_data(b)))) -} - -//export go_read_bio_read -func go_read_bio_read(b *C.BIO, data *C.char, size C.int) (rc C.int) { - defer func() { - if err := recover(); err != nil { - logger.Critf("openssl: go_read_bio_read panic'd: %v", err) - rc = -1 - } - }() - ptr := loadReadPtr(b) - if ptr == nil || size < 0 { - return -1 - } - ptr.data_mtx.Lock() - defer ptr.data_mtx.Unlock() - bioClearRetryFlags(b) - if len(ptr.buf) == 0 { - if ptr.eof { - return 0 - } - bioSetRetryRead(b) - return -1 - } - if size == 0 || data == nil { - return C.int(len(ptr.buf)) - } - n := copy(nonCopyCString(data, size), ptr.buf) - ptr.buf = ptr.buf[:copy(ptr.buf, ptr.buf[n:])] - if ptr.release_buffers && len(ptr.buf) == 0 { - ptr.buf = nil - } - return C.int(n) -} - -//export go_read_bio_ctrl -func go_read_bio_ctrl(b *C.BIO, cmd C.int, arg1 C.long, arg2 unsafe.Pointer) ( - rc C.long) { - - defer func() { - if err := recover(); err != nil { - logger.Critf("openssl: readBioCtrl panic'd: %v", err) - rc = -1 - } - }() - switch cmd { - case C.BIO_CTRL_PENDING: - return readBioPending(b) - case C.BIO_CTRL_DUP, C.BIO_CTRL_FLUSH: - return 1 - default: - return 0 - } -} - -func readBioPending(b *C.BIO) C.long { - ptr := loadReadPtr(b) - if ptr == nil { - return 0 - } - ptr.data_mtx.Lock() - defer ptr.data_mtx.Unlock() - return C.long(len(ptr.buf)) -} - -func (rb *readBio) ReadFromOnce(r io.Reader) (n int, err error) { - rb.op_mtx.Lock() - defer rb.op_mtx.Unlock() - - // make sure we have a destination that fits at least one SSL record - rb.data_mtx.Lock() - if cap(rb.buf) < len(rb.buf)+SSLRecordSize { - new_buf := make([]byte, len(rb.buf), len(rb.buf)+SSLRecordSize) - copy(new_buf, rb.buf) - rb.buf = new_buf - } - dst := rb.buf[len(rb.buf):cap(rb.buf)] - dst_slice := rb.buf - rb.data_mtx.Unlock() - - n, err = r.Read(dst) - rb.data_mtx.Lock() - defer rb.data_mtx.Unlock() - if n > 0 { - if len(dst_slice) != len(rb.buf) { - // someone shrunk the buffer, so we read in too far ahead and we - // need to slide backwards - copy(rb.buf[len(rb.buf):len(rb.buf)+n], dst) - } - rb.buf = rb.buf[:len(rb.buf)+n] - } - return n, err -} - -func (rb *readBio) MakeCBIO() *C.BIO { - rv := C.X_BIO_new_read_bio() - token := readBioMapping.Add(unsafe.Pointer(rb)) - C.X_BIO_set_data(rv, unsafe.Pointer(token)) - return rv -} - -func (rb *readBio) Disconnect(b *C.BIO) { - if loadReadPtr(b) == rb { - readBioMapping.Del(token(C.X_BIO_get_data(b))) - C.X_BIO_set_data(b, nil) - } -} - -func (rb *readBio) MarkEOF() { - rb.data_mtx.Lock() - defer rb.data_mtx.Unlock() - rb.eof = true -} - -type anyBio C.BIO - -func asAnyBio(b *C.BIO) *anyBio { return (*anyBio)(b) } - -func (b *anyBio) Read(buf []byte) (n int, err error) { - if len(buf) == 0 { - return 0, nil - } - n = int(C.X_BIO_read((*C.BIO)(b), unsafe.Pointer(&buf[0]), C.int(len(buf)))) - if n <= 0 { - return 0, io.EOF - } - return n, nil -} - -func (b *anyBio) Write(buf []byte) (written int, err error) { - if len(buf) == 0 { - return 0, nil - } - n := int(C.X_BIO_write((*C.BIO)(b), unsafe.Pointer(&buf[0]), - C.int(len(buf)))) - if n != len(buf) { - return n, errors.New("BIO write failed") - } - return n, nil -} diff --git a/vendor/github.com/libp2p/go-openssl/build.go b/vendor/github.com/libp2p/go-openssl/build.go deleted file mode 100644 index 990fbb4b..00000000 --- a/vendor/github.com/libp2p/go-openssl/build.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !openssl_static -// +build !openssl_static - -package openssl - -// #cgo linux windows freebsd openbsd solaris pkg-config: libssl libcrypto -// #cgo linux freebsd openbsd solaris CFLAGS: -Wno-deprecated-declarations -// #cgo darwin CFLAGS: -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/openssl/include -Wno-deprecated-declarations -// #cgo darwin LDFLAGS: -L/usr/local/opt/openssl@1.1/lib -L/usr/local/opt/openssl/lib -lssl -lcrypto -// #cgo windows CFLAGS: -DWIN32_LEAN_AND_MEAN -import "C" diff --git a/vendor/github.com/libp2p/go-openssl/build_static.go b/vendor/github.com/libp2p/go-openssl/build_static.go deleted file mode 100644 index dde54461..00000000 --- a/vendor/github.com/libp2p/go-openssl/build_static.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build openssl_static -// +build openssl_static - -package openssl - -// #cgo linux windows freebsd openbsd solaris pkg-config: --static libssl libcrypto -// #cgo linux freebsd openbsd solaris CFLAGS: -Wno-deprecated-declarations -// #cgo darwin CFLAGS: -I/usr/local/opt/openssl@1.1/include -I/usr/local/opt/openssl/include -Wno-deprecated-declarations -// #cgo darwin LDFLAGS: -L/usr/local/opt/openssl@1.1/lib -L/usr/local/opt/openssl/lib -lssl -lcrypto -// #cgo windows CFLAGS: -DWIN32_LEAN_AND_MEAN -import "C" diff --git a/vendor/github.com/libp2p/go-openssl/cert.go b/vendor/github.com/libp2p/go-openssl/cert.go deleted file mode 100644 index 97c788f7..00000000 --- a/vendor/github.com/libp2p/go-openssl/cert.go +++ /dev/null @@ -1,432 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -// #include "shim.h" -import "C" - -import ( - "errors" - "io/ioutil" - "math/big" - "runtime" - "time" - "unsafe" -) - -type EVP_MD int - -const ( - EVP_NULL EVP_MD = iota - EVP_MD5 EVP_MD = iota - EVP_MD4 EVP_MD = iota - EVP_SHA EVP_MD = iota - EVP_SHA1 EVP_MD = iota - EVP_DSS EVP_MD = iota - EVP_DSS1 EVP_MD = iota - EVP_MDC2 EVP_MD = iota - EVP_RIPEMD160 EVP_MD = iota - EVP_SHA224 EVP_MD = iota - EVP_SHA256 EVP_MD = iota - EVP_SHA384 EVP_MD = iota - EVP_SHA512 EVP_MD = iota -) - -// X509_Version represents a version on an x509 certificate. -type X509_Version int - -// Specify constants for x509 versions because the standard states that they -// are represented internally as one lower than the common version name. -const ( - X509_V1 X509_Version = 0 - X509_V3 X509_Version = 2 -) - -type Certificate struct { - x *C.X509 - Issuer *Certificate - ref interface{} - pubKey PublicKey -} - -type CertificateInfo struct { - Serial *big.Int - Issued time.Duration - Expires time.Duration - Country string - Organization string - CommonName string -} - -type Name struct { - name *C.X509_NAME -} - -// Allocate and return a new Name object. -func NewName() (*Name, error) { - n := C.X509_NAME_new() - if n == nil { - return nil, errors.New("could not create x509 name") - } - name := &Name{name: n} - runtime.SetFinalizer(name, func(n *Name) { - C.X509_NAME_free(n.name) - }) - return name, nil -} - -// AddTextEntry appends a text entry to an X509 NAME. -func (n *Name) AddTextEntry(field, value string) error { - cfield := C.CString(field) - defer C.free(unsafe.Pointer(cfield)) - cvalue := (*C.uchar)(unsafe.Pointer(C.CString(value))) - defer C.free(unsafe.Pointer(cvalue)) - ret := C.X509_NAME_add_entry_by_txt( - n.name, cfield, C.MBSTRING_ASC, cvalue, -1, -1, 0) - if ret != 1 { - return errors.New("failed to add x509 name text entry") - } - return nil -} - -// AddTextEntries allows adding multiple entries to a name in one call. -func (n *Name) AddTextEntries(entries map[string]string) error { - for f, v := range entries { - if err := n.AddTextEntry(f, v); err != nil { - return err - } - } - return nil -} - -// GetEntry returns a name entry based on NID. If no entry, then ("", false) is -// returned. -func (n *Name) GetEntry(nid NID) (entry string, ok bool) { - entrylen := C.X509_NAME_get_text_by_NID(n.name, C.int(nid), nil, 0) - if entrylen == -1 { - return "", false - } - buf := (*C.char)(C.malloc(C.size_t(entrylen + 1))) - defer C.free(unsafe.Pointer(buf)) - C.X509_NAME_get_text_by_NID(n.name, C.int(nid), buf, entrylen+1) - return C.GoStringN(buf, entrylen), true -} - -// NewCertificate generates a basic certificate based -// on the provided CertificateInfo struct -func NewCertificate(info *CertificateInfo, key PublicKey) (*Certificate, error) { - c := &Certificate{x: C.X509_new()} - runtime.SetFinalizer(c, func(c *Certificate) { - C.X509_free(c.x) - }) - - name, err := c.GetSubjectName() - if err != nil { - return nil, err - } - err = name.AddTextEntries(map[string]string{ - "C": info.Country, - "O": info.Organization, - "CN": info.CommonName, - }) - if err != nil { - return nil, err - } - // self-issue for now - if err := c.SetIssuerName(name); err != nil { - return nil, err - } - if err := c.SetSerial(info.Serial); err != nil { - return nil, err - } - if err := c.SetIssueDate(info.Issued); err != nil { - return nil, err - } - if err := c.SetExpireDate(info.Expires); err != nil { - return nil, err - } - if err := c.SetPubKey(key); err != nil { - return nil, err - } - return c, nil -} - -func (c *Certificate) GetSubjectName() (*Name, error) { - n := C.X509_get_subject_name(c.x) - if n == nil { - return nil, errors.New("failed to get subject name") - } - return &Name{name: n}, nil -} - -func (c *Certificate) GetIssuerName() (*Name, error) { - n := C.X509_get_issuer_name(c.x) - if n == nil { - return nil, errors.New("failed to get issuer name") - } - return &Name{name: n}, nil -} - -func (c *Certificate) SetSubjectName(name *Name) error { - if C.X509_set_subject_name(c.x, name.name) != 1 { - return errors.New("failed to set subject name") - } - return nil -} - -// SetIssuer updates the stored Issuer cert -// and the internal x509 Issuer Name of a certificate. -// The stored Issuer reference is used when adding extensions. -func (c *Certificate) SetIssuer(issuer *Certificate) error { - name, err := issuer.GetSubjectName() - if err != nil { - return err - } - if err = c.SetIssuerName(name); err != nil { - return err - } - c.Issuer = issuer - return nil -} - -// SetIssuerName populates the issuer name of a certificate. -// Use SetIssuer instead, if possible. -func (c *Certificate) SetIssuerName(name *Name) error { - if C.X509_set_issuer_name(c.x, name.name) != 1 { - return errors.New("failed to set subject name") - } - return nil -} - -// SetSerial sets the serial of a certificate. -func (c *Certificate) SetSerial(serial *big.Int) error { - sno := C.ASN1_INTEGER_new() - defer C.ASN1_INTEGER_free(sno) - bn := C.BN_new() - defer C.BN_free(bn) - - serialBytes := serial.Bytes() - if bn = C.BN_bin2bn((*C.uchar)(unsafe.Pointer(&serialBytes[0])), C.int(len(serialBytes)), bn); bn == nil { - return errors.New("failed to set serial") - } - if sno = C.BN_to_ASN1_INTEGER(bn, sno); sno == nil { - return errors.New("failed to set serial") - } - if C.X509_set_serialNumber(c.x, sno) != 1 { - return errors.New("failed to set serial") - } - return nil -} - -// SetIssueDate sets the certificate issue date relative to the current time. -func (c *Certificate) SetIssueDate(when time.Duration) error { - offset := C.long(when / time.Second) - result := C.X509_gmtime_adj(C.X_X509_get0_notBefore(c.x), offset) - if result == nil { - return errors.New("failed to set issue date") - } - return nil -} - -// SetExpireDate sets the certificate issue date relative to the current time. -func (c *Certificate) SetExpireDate(when time.Duration) error { - offset := C.long(when / time.Second) - result := C.X509_gmtime_adj(C.X_X509_get0_notAfter(c.x), offset) - if result == nil { - return errors.New("failed to set expire date") - } - return nil -} - -// SetPubKey assigns a new public key to a certificate. -func (c *Certificate) SetPubKey(pubKey PublicKey) error { - c.pubKey = pubKey - if C.X509_set_pubkey(c.x, pubKey.evpPKey()) != 1 { - return errors.New("failed to set public key") - } - return nil -} - -// Sign a certificate using a private key and a digest name. -// Accepted digest names are 'sha256', 'sha384', and 'sha512'. -func (c *Certificate) Sign(privKey PrivateKey, digest EVP_MD) error { - switch digest { - case EVP_SHA256: - case EVP_SHA384: - case EVP_SHA512: - default: - return errors.New("unsupported digest; " + - "you're probably looking for 'EVP_SHA256' or 'EVP_SHA512'") - } - return c.insecureSign(privKey, digest) -} - -func (c *Certificate) insecureSign(privKey PrivateKey, digest EVP_MD) error { - var md *C.EVP_MD = getDigestFunction(digest) - if C.X509_sign(c.x, privKey.evpPKey(), md) <= 0 { - return errors.New("failed to sign certificate") - } - return nil -} - -func getDigestFunction(digest EVP_MD) (md *C.EVP_MD) { - switch digest { - // please don't use these digest functions - case EVP_NULL: - md = C.X_EVP_md_null() - case EVP_MD5: - md = C.X_EVP_md5() - case EVP_SHA: - md = C.X_EVP_sha() - case EVP_SHA1: - md = C.X_EVP_sha1() - case EVP_DSS: - md = C.X_EVP_dss() - case EVP_DSS1: - md = C.X_EVP_dss1() - case EVP_RIPEMD160: - md = C.X_EVP_ripemd160() - case EVP_SHA224: - md = C.X_EVP_sha224() - // you actually want one of these - case EVP_SHA256: - md = C.X_EVP_sha256() - case EVP_SHA384: - md = C.X_EVP_sha384() - case EVP_SHA512: - md = C.X_EVP_sha512() - } - return md -} - -// Add an extension to a certificate. -// Extension constants are NID_* as found in openssl. -func (c *Certificate) AddExtension(nid NID, value string) error { - issuer := c - if c.Issuer != nil { - issuer = c.Issuer - } - var ctx C.X509V3_CTX - C.X509V3_set_ctx(&ctx, c.x, issuer.x, nil, nil, 0) - ex := C.X509V3_EXT_conf_nid(nil, &ctx, C.int(nid), C.CString(value)) - if ex == nil { - return errors.New("failed to create x509v3 extension") - } - defer C.X509_EXTENSION_free(ex) - if C.X509_add_ext(c.x, ex, -1) <= 0 { - return errors.New("failed to add x509v3 extension") - } - return nil -} - -// AddCustomExtension add custom extenstion to the certificate. -func (c *Certificate) AddCustomExtension(nid NID, value []byte) error { - val := (*C.char)(C.CBytes(value)) - defer C.free(unsafe.Pointer(val)) - if int(C.add_custom_ext(c.x, C.int(nid), val, C.int(len(value)))) == 0 { - return errors.New("unable to add extension") - } - return nil -} - -// Wraps AddExtension using a map of NID to text extension. -// Will return without finishing if it encounters an error. -func (c *Certificate) AddExtensions(extensions map[NID]string) error { - for nid, value := range extensions { - if err := c.AddExtension(nid, value); err != nil { - return err - } - } - return nil -} - -// LoadCertificateFromPEM loads an X509 certificate from a PEM-encoded block. -func LoadCertificateFromPEM(pem_block []byte) (*Certificate, error) { - if len(pem_block) == 0 { - return nil, errors.New("empty pem block") - } - runtime.LockOSThread() - defer runtime.UnlockOSThread() - bio := C.BIO_new_mem_buf(unsafe.Pointer(&pem_block[0]), - C.int(len(pem_block))) - cert := C.PEM_read_bio_X509(bio, nil, nil, nil) - C.BIO_free(bio) - if cert == nil { - return nil, errorFromErrorQueue() - } - x := &Certificate{x: cert} - runtime.SetFinalizer(x, func(x *Certificate) { - C.X509_free(x.x) - }) - return x, nil -} - -// MarshalPEM converts the X509 certificate to PEM-encoded format -func (c *Certificate) MarshalPEM() (pem_block []byte, err error) { - bio := C.BIO_new(C.BIO_s_mem()) - if bio == nil { - return nil, errors.New("failed to allocate memory BIO") - } - defer C.BIO_free(bio) - if int(C.PEM_write_bio_X509(bio, c.x)) != 1 { - return nil, errors.New("failed dumping certificate") - } - return ioutil.ReadAll(asAnyBio(bio)) -} - -// PublicKey returns the public key embedded in the X509 certificate. -func (c *Certificate) PublicKey() (PublicKey, error) { - pkey := C.X509_get_pubkey(c.x) - if pkey == nil { - return nil, errors.New("no public key found") - } - key := &pKey{key: pkey} - runtime.SetFinalizer(key, func(key *pKey) { - C.EVP_PKEY_free(key.key) - }) - return key, nil -} - -// GetSerialNumberHex returns the certificate's serial number in hex format -func (c *Certificate) GetSerialNumberHex() (serial string) { - asn1_i := C.X509_get_serialNumber(c.x) - bignum := C.ASN1_INTEGER_to_BN(asn1_i, nil) - hex := C.BN_bn2hex(bignum) - serial = C.GoString(hex) - C.BN_free(bignum) - C.X_OPENSSL_free(unsafe.Pointer(hex)) - return -} - -// GetVersion returns the X509 version of the certificate. -func (c *Certificate) GetVersion() X509_Version { - return X509_Version(C.X_X509_get_version(c.x)) -} - -// SetVersion sets the X509 version of the certificate. -func (c *Certificate) SetVersion(version X509_Version) error { - cvers := C.long(version) - if C.X_X509_set_version(c.x, cvers) != 1 { - return errors.New("failed to set certificate version") - } - return nil -} - -// GetExtensionValue returns the value of the given NID's extension. -func (c *Certificate) GetExtensionValue(nid NID) []byte { - dataLength := C.int(0) - val := C.get_extention(c.x, C.int(nid), &dataLength) - return C.GoBytes(unsafe.Pointer(val), dataLength) -} diff --git a/vendor/github.com/libp2p/go-openssl/ciphers.go b/vendor/github.com/libp2p/go-openssl/ciphers.go deleted file mode 100644 index a3a597c4..00000000 --- a/vendor/github.com/libp2p/go-openssl/ciphers.go +++ /dev/null @@ -1,335 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -// #include "shim.h" -import "C" - -import ( - "errors" - "fmt" - "runtime" - "unsafe" -) - -const ( - GCM_TAG_MAXLEN = 16 -) - -type CipherCtx interface { - Cipher() *Cipher - BlockSize() int - KeySize() int - IVSize() int -} - -type Cipher struct { - ptr *C.EVP_CIPHER -} - -func (c *Cipher) Nid() NID { - return NID(C.X_EVP_CIPHER_nid(c.ptr)) -} - -func (c *Cipher) ShortName() (string, error) { - return Nid2ShortName(c.Nid()) -} - -func (c *Cipher) BlockSize() int { - return int(C.X_EVP_CIPHER_block_size(c.ptr)) -} - -func (c *Cipher) KeySize() int { - return int(C.X_EVP_CIPHER_key_length(c.ptr)) -} - -func (c *Cipher) IVSize() int { - return int(C.X_EVP_CIPHER_iv_length(c.ptr)) -} - -func Nid2ShortName(nid NID) (string, error) { - sn := C.OBJ_nid2sn(C.int(nid)) - if sn == nil { - return "", fmt.Errorf("NID %d not found", nid) - } - return C.GoString(sn), nil -} - -func GetCipherByName(name string) (*Cipher, error) { - cname := C.CString(name) - defer C.free(unsafe.Pointer(cname)) - p := C.EVP_get_cipherbyname(cname) - if p == nil { - return nil, fmt.Errorf("Cipher %v not found", name) - } - // we can consider ciphers to use static mem; don't need to free - return &Cipher{ptr: p}, nil -} - -func GetCipherByNid(nid NID) (*Cipher, error) { - sn, err := Nid2ShortName(nid) - if err != nil { - return nil, err - } - return GetCipherByName(sn) -} - -type cipherCtx struct { - ctx *C.EVP_CIPHER_CTX -} - -func newCipherCtx() (*cipherCtx, error) { - cctx := C.EVP_CIPHER_CTX_new() - if cctx == nil { - return nil, errors.New("failed to allocate cipher context") - } - ctx := &cipherCtx{cctx} - runtime.SetFinalizer(ctx, func(ctx *cipherCtx) { - C.EVP_CIPHER_CTX_free(ctx.ctx) - }) - return ctx, nil -} - -func (ctx *cipherCtx) applyKeyAndIV(key, iv []byte) error { - var kptr, iptr *C.uchar - if key != nil { - if len(key) != ctx.KeySize() { - return fmt.Errorf("bad key size (%d bytes instead of %d)", - len(key), ctx.KeySize()) - } - kptr = (*C.uchar)(&key[0]) - } - if iv != nil { - if len(iv) != ctx.IVSize() { - return fmt.Errorf("bad IV size (%d bytes instead of %d)", - len(iv), ctx.IVSize()) - } - iptr = (*C.uchar)(&iv[0]) - } - if kptr != nil || iptr != nil { - var res C.int - if C.X_EVP_CIPHER_CTX_encrypting(ctx.ctx) != 0 { - res = C.EVP_EncryptInit_ex(ctx.ctx, nil, nil, kptr, iptr) - } else { - res = C.EVP_DecryptInit_ex(ctx.ctx, nil, nil, kptr, iptr) - } - if res != 1 { - return errors.New("failed to apply key/IV") - } - } - return nil -} - -func (ctx *cipherCtx) Cipher() *Cipher { - return &Cipher{ptr: C.X_EVP_CIPHER_CTX_cipher(ctx.ctx)} -} - -func (ctx *cipherCtx) BlockSize() int { - return int(C.X_EVP_CIPHER_CTX_block_size(ctx.ctx)) -} - -func (ctx *cipherCtx) KeySize() int { - return int(C.X_EVP_CIPHER_CTX_key_length(ctx.ctx)) -} - -func (ctx *cipherCtx) IVSize() int { - return int(C.X_EVP_CIPHER_CTX_iv_length(ctx.ctx)) -} - -func (ctx *cipherCtx) SetPadding(pad bool) { - if pad { - C.X_EVP_CIPHER_CTX_set_padding(ctx.ctx, 1) - } else { - C.X_EVP_CIPHER_CTX_set_padding(ctx.ctx, 0) - } -} - -func (ctx *cipherCtx) setCtrl(code, arg int) error { - res := C.EVP_CIPHER_CTX_ctrl(ctx.ctx, C.int(code), C.int(arg), nil) - if res != 1 { - return fmt.Errorf("failed to set code %d to %d [result %d]", - code, arg, res) - } - return nil -} - -func (ctx *cipherCtx) setCtrlBytes(code, arg int, value []byte) error { - res := C.EVP_CIPHER_CTX_ctrl(ctx.ctx, C.int(code), C.int(arg), - unsafe.Pointer(&value[0])) - if res != 1 { - return fmt.Errorf("failed to set code %d with arg %d to %x [result %d]", - code, arg, value, res) - } - return nil -} - -func (ctx *cipherCtx) getCtrlInt(code, arg int) (int, error) { - var returnVal C.int - res := C.EVP_CIPHER_CTX_ctrl(ctx.ctx, C.int(code), C.int(arg), - unsafe.Pointer(&returnVal)) - if res != 1 { - return 0, fmt.Errorf("failed to get code %d with arg %d [result %d]", - code, arg, res) - } - return int(returnVal), nil -} - -func (ctx *cipherCtx) getCtrlBytes(code, arg, expectsize int) ([]byte, error) { - returnVal := make([]byte, expectsize) - res := C.EVP_CIPHER_CTX_ctrl(ctx.ctx, C.int(code), C.int(arg), - unsafe.Pointer(&returnVal[0])) - if res != 1 { - return nil, fmt.Errorf("failed to get code %d with arg %d [result %d]", - code, arg, res) - } - return returnVal, nil -} - -type EncryptionCipherCtx interface { - CipherCtx - - // pass in plaintext, get back ciphertext. can be called - // multiple times as needed - EncryptUpdate(input []byte) ([]byte, error) - - // call after all plaintext has been passed in; may return - // additional ciphertext if needed to finish off a block - // or extra padding information - EncryptFinal() ([]byte, error) -} - -type DecryptionCipherCtx interface { - CipherCtx - - // pass in ciphertext, get back plaintext. can be called - // multiple times as needed - DecryptUpdate(input []byte) ([]byte, error) - - // call after all ciphertext has been passed in; may return - // additional plaintext if needed to finish off a block - DecryptFinal() ([]byte, error) -} - -type encryptionCipherCtx struct { - *cipherCtx -} - -type decryptionCipherCtx struct { - *cipherCtx -} - -func newEncryptionCipherCtx(c *Cipher, e *Engine, key, iv []byte) ( - *encryptionCipherCtx, error) { - if c == nil { - return nil, errors.New("null cipher not allowed") - } - ctx, err := newCipherCtx() - if err != nil { - return nil, err - } - var eptr *C.ENGINE - if e != nil { - eptr = e.e - } - if C.EVP_EncryptInit_ex(ctx.ctx, c.ptr, eptr, nil, nil) != 1 { - return nil, errors.New("failed to initialize cipher context") - } - err = ctx.applyKeyAndIV(key, iv) - if err != nil { - return nil, err - } - return &encryptionCipherCtx{cipherCtx: ctx}, nil -} - -func newDecryptionCipherCtx(c *Cipher, e *Engine, key, iv []byte) ( - *decryptionCipherCtx, error) { - if c == nil { - return nil, errors.New("null cipher not allowed") - } - ctx, err := newCipherCtx() - if err != nil { - return nil, err - } - var eptr *C.ENGINE - if e != nil { - eptr = e.e - } - if C.EVP_DecryptInit_ex(ctx.ctx, c.ptr, eptr, nil, nil) != 1 { - return nil, errors.New("failed to initialize cipher context") - } - err = ctx.applyKeyAndIV(key, iv) - if err != nil { - return nil, err - } - return &decryptionCipherCtx{cipherCtx: ctx}, nil -} - -func NewEncryptionCipherCtx(c *Cipher, e *Engine, key, iv []byte) ( - EncryptionCipherCtx, error) { - return newEncryptionCipherCtx(c, e, key, iv) -} - -func NewDecryptionCipherCtx(c *Cipher, e *Engine, key, iv []byte) ( - DecryptionCipherCtx, error) { - return newDecryptionCipherCtx(c, e, key, iv) -} - -func (ctx *encryptionCipherCtx) EncryptUpdate(input []byte) ([]byte, error) { - if len(input) == 0 { - return nil, nil - } - outbuf := make([]byte, len(input)+ctx.BlockSize()) - outlen := C.int(len(outbuf)) - res := C.EVP_EncryptUpdate(ctx.ctx, (*C.uchar)(&outbuf[0]), &outlen, - (*C.uchar)(&input[0]), C.int(len(input))) - if res != 1 { - return nil, fmt.Errorf("failed to encrypt [result %d]", res) - } - return outbuf[:outlen], nil -} - -func (ctx *decryptionCipherCtx) DecryptUpdate(input []byte) ([]byte, error) { - if len(input) == 0 { - return nil, nil - } - outbuf := make([]byte, len(input)+ctx.BlockSize()) - outlen := C.int(len(outbuf)) - res := C.EVP_DecryptUpdate(ctx.ctx, (*C.uchar)(&outbuf[0]), &outlen, - (*C.uchar)(&input[0]), C.int(len(input))) - if res != 1 { - return nil, fmt.Errorf("failed to decrypt [result %d]", res) - } - return outbuf[:outlen], nil -} - -func (ctx *encryptionCipherCtx) EncryptFinal() ([]byte, error) { - outbuf := make([]byte, ctx.BlockSize()) - var outlen C.int - if C.EVP_EncryptFinal_ex(ctx.ctx, (*C.uchar)(&outbuf[0]), &outlen) != 1 { - return nil, errors.New("encryption failed") - } - return outbuf[:outlen], nil -} - -func (ctx *decryptionCipherCtx) DecryptFinal() ([]byte, error) { - outbuf := make([]byte, ctx.BlockSize()) - var outlen C.int - if C.EVP_DecryptFinal_ex(ctx.ctx, (*C.uchar)(&outbuf[0]), &outlen) != 1 { - // this may mean the tag failed to verify- all previous plaintext - // returned must be considered faked and invalid - return nil, errors.New("decryption failed") - } - return outbuf[:outlen], nil -} diff --git a/vendor/github.com/libp2p/go-openssl/ciphers_gcm.go b/vendor/github.com/libp2p/go-openssl/ciphers_gcm.go deleted file mode 100644 index 06ba0fed..00000000 --- a/vendor/github.com/libp2p/go-openssl/ciphers_gcm.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -// #include -import "C" - -import ( - "errors" - "fmt" -) - -type AuthenticatedEncryptionCipherCtx interface { - EncryptionCipherCtx - - // data passed in to ExtraData() is part of the final output; it is - // not encrypted itself, but is part of the authenticated data. when - // decrypting or authenticating, pass back with the decryption - // context's ExtraData() - ExtraData([]byte) error - - // use after finalizing encryption to get the authenticating tag - GetTag() ([]byte, error) -} - -type AuthenticatedDecryptionCipherCtx interface { - DecryptionCipherCtx - - // pass in any extra data that was added during encryption with the - // encryption context's ExtraData() - ExtraData([]byte) error - - // use before finalizing decryption to tell the library what the - // tag is expected to be - SetTag([]byte) error -} - -type authEncryptionCipherCtx struct { - *encryptionCipherCtx -} - -type authDecryptionCipherCtx struct { - *decryptionCipherCtx -} - -func getGCMCipher(blocksize int) (*Cipher, error) { - var cipherptr *C.EVP_CIPHER - switch blocksize { - case 256: - cipherptr = C.EVP_aes_256_gcm() - case 192: - cipherptr = C.EVP_aes_192_gcm() - case 128: - cipherptr = C.EVP_aes_128_gcm() - default: - return nil, fmt.Errorf("unknown block size %d", blocksize) - } - return &Cipher{ptr: cipherptr}, nil -} - -func NewGCMEncryptionCipherCtx(blocksize int, e *Engine, key, iv []byte) ( - AuthenticatedEncryptionCipherCtx, error) { - cipher, err := getGCMCipher(blocksize) - if err != nil { - return nil, err - } - ctx, err := newEncryptionCipherCtx(cipher, e, key, nil) - if err != nil { - return nil, err - } - if len(iv) > 0 { - err := ctx.setCtrl(C.EVP_CTRL_GCM_SET_IVLEN, len(iv)) - if err != nil { - return nil, fmt.Errorf("could not set IV len to %d: %s", - len(iv), err) - } - if C.EVP_EncryptInit_ex(ctx.ctx, nil, nil, nil, - (*C.uchar)(&iv[0])) != 1 { - return nil, errors.New("failed to apply IV") - } - } - return &authEncryptionCipherCtx{encryptionCipherCtx: ctx}, nil -} - -func NewGCMDecryptionCipherCtx(blocksize int, e *Engine, key, iv []byte) ( - AuthenticatedDecryptionCipherCtx, error) { - cipher, err := getGCMCipher(blocksize) - if err != nil { - return nil, err - } - ctx, err := newDecryptionCipherCtx(cipher, e, key, nil) - if err != nil { - return nil, err - } - if len(iv) > 0 { - err := ctx.setCtrl(C.EVP_CTRL_GCM_SET_IVLEN, len(iv)) - if err != nil { - return nil, fmt.Errorf("could not set IV len to %d: %s", - len(iv), err) - } - if C.EVP_DecryptInit_ex(ctx.ctx, nil, nil, nil, - (*C.uchar)(&iv[0])) != 1 { - return nil, errors.New("failed to apply IV") - } - } - return &authDecryptionCipherCtx{decryptionCipherCtx: ctx}, nil -} - -func (ctx *authEncryptionCipherCtx) ExtraData(aad []byte) error { - if aad == nil { - return nil - } - var outlen C.int - if C.EVP_EncryptUpdate(ctx.ctx, nil, &outlen, (*C.uchar)(&aad[0]), - C.int(len(aad))) != 1 { - return errors.New("failed to add additional authenticated data") - } - return nil -} - -func (ctx *authDecryptionCipherCtx) ExtraData(aad []byte) error { - if aad == nil { - return nil - } - var outlen C.int - if C.EVP_DecryptUpdate(ctx.ctx, nil, &outlen, (*C.uchar)(&aad[0]), - C.int(len(aad))) != 1 { - return errors.New("failed to add additional authenticated data") - } - return nil -} - -func (ctx *authEncryptionCipherCtx) GetTag() ([]byte, error) { - return ctx.getCtrlBytes(C.EVP_CTRL_GCM_GET_TAG, GCM_TAG_MAXLEN, - GCM_TAG_MAXLEN) -} - -func (ctx *authDecryptionCipherCtx) SetTag(tag []byte) error { - return ctx.setCtrlBytes(C.EVP_CTRL_GCM_SET_TAG, len(tag), tag) -} diff --git a/vendor/github.com/libp2p/go-openssl/conn.go b/vendor/github.com/libp2p/go-openssl/conn.go deleted file mode 100644 index fc9421ff..00000000 --- a/vendor/github.com/libp2p/go-openssl/conn.go +++ /dev/null @@ -1,621 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -// #include "shim.h" -import "C" - -import ( - "errors" - "fmt" - "io" - "net" - "runtime" - "sync" - "time" - "unsafe" - - "github.com/libp2p/go-openssl/utils" - "github.com/mattn/go-pointer" -) - -var ( - errZeroReturn = errors.New("zero return") - errWantRead = errors.New("want read") - errWantWrite = errors.New("want write") - errTryAgain = errors.New("try again") -) - -type Conn struct { - *SSL - - conn net.Conn - ctx *Ctx // for gc - into_ssl *readBio - from_ssl *writeBio - is_shutdown bool - mtx sync.Mutex - want_read_future *utils.Future -} - -type VerifyResult int - -const ( - Ok VerifyResult = C.X509_V_OK - UnableToGetIssuerCert VerifyResult = C.X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT - UnableToGetCrl VerifyResult = C.X509_V_ERR_UNABLE_TO_GET_CRL - UnableToDecryptCertSignature VerifyResult = C.X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE - UnableToDecryptCrlSignature VerifyResult = C.X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE - UnableToDecodeIssuerPublicKey VerifyResult = C.X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY - CertSignatureFailure VerifyResult = C.X509_V_ERR_CERT_SIGNATURE_FAILURE - CrlSignatureFailure VerifyResult = C.X509_V_ERR_CRL_SIGNATURE_FAILURE - CertNotYetValid VerifyResult = C.X509_V_ERR_CERT_NOT_YET_VALID - CertHasExpired VerifyResult = C.X509_V_ERR_CERT_HAS_EXPIRED - CrlNotYetValid VerifyResult = C.X509_V_ERR_CRL_NOT_YET_VALID - CrlHasExpired VerifyResult = C.X509_V_ERR_CRL_HAS_EXPIRED - ErrorInCertNotBeforeField VerifyResult = C.X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD - ErrorInCertNotAfterField VerifyResult = C.X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD - ErrorInCrlLastUpdateField VerifyResult = C.X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD - ErrorInCrlNextUpdateField VerifyResult = C.X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD - OutOfMem VerifyResult = C.X509_V_ERR_OUT_OF_MEM - DepthZeroSelfSignedCert VerifyResult = C.X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT - SelfSignedCertInChain VerifyResult = C.X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN - UnableToGetIssuerCertLocally VerifyResult = C.X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY - UnableToVerifyLeafSignature VerifyResult = C.X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE - CertChainTooLong VerifyResult = C.X509_V_ERR_CERT_CHAIN_TOO_LONG - CertRevoked VerifyResult = C.X509_V_ERR_CERT_REVOKED - InvalidCa VerifyResult = C.X509_V_ERR_INVALID_CA - PathLengthExceeded VerifyResult = C.X509_V_ERR_PATH_LENGTH_EXCEEDED - InvalidPurpose VerifyResult = C.X509_V_ERR_INVALID_PURPOSE - CertUntrusted VerifyResult = C.X509_V_ERR_CERT_UNTRUSTED - CertRejected VerifyResult = C.X509_V_ERR_CERT_REJECTED - SubjectIssuerMismatch VerifyResult = C.X509_V_ERR_SUBJECT_ISSUER_MISMATCH - AkidSkidMismatch VerifyResult = C.X509_V_ERR_AKID_SKID_MISMATCH - AkidIssuerSerialMismatch VerifyResult = C.X509_V_ERR_AKID_ISSUER_SERIAL_MISMATCH - KeyusageNoCertsign VerifyResult = C.X509_V_ERR_KEYUSAGE_NO_CERTSIGN - UnableToGetCrlIssuer VerifyResult = C.X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER - UnhandledCriticalExtension VerifyResult = C.X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION - KeyusageNoCrlSign VerifyResult = C.X509_V_ERR_KEYUSAGE_NO_CRL_SIGN - UnhandledCriticalCrlExtension VerifyResult = C.X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION - InvalidNonCa VerifyResult = C.X509_V_ERR_INVALID_NON_CA - ProxyPathLengthExceeded VerifyResult = C.X509_V_ERR_PROXY_PATH_LENGTH_EXCEEDED - KeyusageNoDigitalSignature VerifyResult = C.X509_V_ERR_KEYUSAGE_NO_DIGITAL_SIGNATURE - ProxyCertificatesNotAllowed VerifyResult = C.X509_V_ERR_PROXY_CERTIFICATES_NOT_ALLOWED - InvalidExtension VerifyResult = C.X509_V_ERR_INVALID_EXTENSION - InvalidPolicyExtension VerifyResult = C.X509_V_ERR_INVALID_POLICY_EXTENSION - NoExplicitPolicy VerifyResult = C.X509_V_ERR_NO_EXPLICIT_POLICY - UnnestedResource VerifyResult = C.X509_V_ERR_UNNESTED_RESOURCE - ApplicationVerification VerifyResult = C.X509_V_ERR_APPLICATION_VERIFICATION -) - -func newSSL(ctx *C.SSL_CTX) (*C.SSL, error) { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - ssl := C.SSL_new(ctx) - if ssl == nil { - return nil, errorFromErrorQueue() - } - return ssl, nil -} - -func newConn(conn net.Conn, ctx *Ctx) (*Conn, error) { - ssl, err := newSSL(ctx.ctx) - if err != nil { - return nil, err - } - - into_ssl := &readBio{} - from_ssl := &writeBio{} - - if ctx.GetMode()&ReleaseBuffers > 0 { - into_ssl.release_buffers = true - from_ssl.release_buffers = true - } - - into_ssl_cbio := into_ssl.MakeCBIO() - from_ssl_cbio := from_ssl.MakeCBIO() - if into_ssl_cbio == nil || from_ssl_cbio == nil { - // these frees are null safe - C.BIO_free(into_ssl_cbio) - C.BIO_free(from_ssl_cbio) - C.SSL_free(ssl) - return nil, errors.New("failed to allocate memory BIO") - } - - // the ssl object takes ownership of these objects now - C.SSL_set_bio(ssl, into_ssl_cbio, from_ssl_cbio) - - s := &SSL{ssl: ssl} - C.SSL_set_ex_data(s.ssl, get_ssl_idx(), pointer.Save(s)) - - c := &Conn{ - SSL: s, - - conn: conn, - ctx: ctx, - into_ssl: into_ssl, - from_ssl: from_ssl} - runtime.SetFinalizer(c, func(c *Conn) { - c.into_ssl.Disconnect(into_ssl_cbio) - c.from_ssl.Disconnect(from_ssl_cbio) - C.SSL_free(c.ssl) - }) - return c, nil -} - -// Client wraps an existing stream connection and puts it in the connect state -// for any subsequent handshakes. -// -// IMPORTANT NOTE: if you use this method instead of Dial to construct an SSL -// connection, you are responsible for verifying the peer's hostname. -// Otherwise, you are vulnerable to MITM attacks. -// -// Client also does not set up SNI for you like Dial does. -// -// Client connections probably won't work for you unless you set a verify -// location or add some certs to the certificate store of the client context -// you're using. This library is not nice enough to use the system certificate -// store by default for you yet. -func Client(conn net.Conn, ctx *Ctx) (*Conn, error) { - c, err := newConn(conn, ctx) - if err != nil { - return nil, err - } - C.SSL_set_connect_state(c.ssl) - return c, nil -} - -// Server wraps an existing stream connection and puts it in the accept state -// for any subsequent handshakes. -func Server(conn net.Conn, ctx *Ctx) (*Conn, error) { - c, err := newConn(conn, ctx) - if err != nil { - return nil, err - } - C.SSL_set_accept_state(c.ssl) - return c, nil -} - -func (c *Conn) GetCtx() *Ctx { return c.ctx } - -func (c *Conn) CurrentCipher() (string, error) { - p := C.X_SSL_get_cipher_name(c.ssl) - if p == nil { - return "", errors.New("session not established") - } - - return C.GoString(p), nil -} - -func (c *Conn) fillInputBuffer() error { - for { - n, err := c.into_ssl.ReadFromOnce(c.conn) - if n == 0 && err == nil { - continue - } - if err == io.EOF { - c.into_ssl.MarkEOF() - return c.Close() - } - return err - } -} - -func (c *Conn) flushOutputBuffer() error { - _, err := c.from_ssl.WriteTo(c.conn) - return err -} - -func (c *Conn) getErrorHandler(rv C.int, errno error) func() error { - errcode := C.SSL_get_error(c.ssl, rv) - switch errcode { - case C.SSL_ERROR_ZERO_RETURN: - return func() error { - c.Close() - return io.ErrUnexpectedEOF - } - case C.SSL_ERROR_WANT_READ: - go c.flushOutputBuffer() - if c.want_read_future != nil { - want_read_future := c.want_read_future - return func() error { - _, err := want_read_future.Get() - return err - } - } - c.want_read_future = utils.NewFuture() - want_read_future := c.want_read_future - return func() (err error) { - defer func() { - c.mtx.Lock() - c.want_read_future = nil - c.mtx.Unlock() - want_read_future.Set(nil, err) - }() - err = c.fillInputBuffer() - if err != nil { - return err - } - return errTryAgain - } - case C.SSL_ERROR_WANT_WRITE: - return func() error { - err := c.flushOutputBuffer() - if err != nil { - return err - } - return errTryAgain - } - case C.SSL_ERROR_SYSCALL: - var err error - if C.ERR_peek_error() == 0 { - switch rv { - case 0: - err = errors.New("protocol-violating EOF") - case -1: - err = errno - default: - err = errorFromErrorQueue() - } - } else { - err = errorFromErrorQueue() - } - return func() error { return err } - default: - err := errorFromErrorQueue() - return func() error { return err } - } -} - -func (c *Conn) handleError(errcb func() error) error { - if errcb != nil { - return errcb() - } - return nil -} - -func (c *Conn) handshake() func() error { - c.mtx.Lock() - defer c.mtx.Unlock() - if c.is_shutdown { - return func() error { return io.ErrUnexpectedEOF } - } - runtime.LockOSThread() - defer runtime.UnlockOSThread() - rv, errno := C.SSL_do_handshake(c.ssl) - if rv > 0 { - return nil - } - return c.getErrorHandler(rv, errno) -} - -// Handshake performs an SSL handshake. If a handshake is not manually -// triggered, it will run before the first I/O on the encrypted stream. -func (c *Conn) Handshake() error { - err := errTryAgain - for err == errTryAgain { - err = c.handleError(c.handshake()) - } - go c.flushOutputBuffer() - return err -} - -// PeerCertificate returns the Certificate of the peer with which you're -// communicating. Only valid after a handshake. -func (c *Conn) PeerCertificate() (*Certificate, error) { - c.mtx.Lock() - defer c.mtx.Unlock() - if c.is_shutdown { - return nil, errors.New("connection closed") - } - x := C.SSL_get_peer_certificate(c.ssl) - if x == nil { - return nil, errors.New("no peer certificate found") - } - cert := &Certificate{x: x} - runtime.SetFinalizer(cert, func(cert *Certificate) { - C.X509_free(cert.x) - }) - return cert, nil -} - -// loadCertificateStack loads up a stack of x509 certificates and returns them, -// handling memory ownership. -func (c *Conn) loadCertificateStack(sk *C.struct_stack_st_X509) ( - rv []*Certificate) { - - sk_num := int(C.X_sk_X509_num(sk)) - rv = make([]*Certificate, 0, sk_num) - for i := 0; i < sk_num; i++ { - x := C.X_sk_X509_value(sk, C.int(i)) - // ref holds on to the underlying connection memory so we don't need to - // worry about incrementing refcounts manually or freeing the X509 - rv = append(rv, &Certificate{x: x, ref: c}) - } - return rv -} - -// PeerCertificateChain returns the certificate chain of the peer. If called on -// the client side, the stack also contains the peer's certificate; if called -// on the server side, the peer's certificate must be obtained separately using -// PeerCertificate. -func (c *Conn) PeerCertificateChain() (rv []*Certificate, err error) { - c.mtx.Lock() - defer c.mtx.Unlock() - if c.is_shutdown { - return nil, errors.New("connection closed") - } - sk := C.SSL_get_peer_cert_chain(c.ssl) - if sk == nil { - return nil, errors.New("no peer certificates found") - } - return c.loadCertificateStack(sk), nil -} - -type ConnectionState struct { - Certificate *Certificate - CertificateError error - CertificateChain []*Certificate - CertificateChainError error - SessionReused bool -} - -func (c *Conn) ConnectionState() (rv ConnectionState) { - rv.Certificate, rv.CertificateError = c.PeerCertificate() - rv.CertificateChain, rv.CertificateChainError = c.PeerCertificateChain() - rv.SessionReused = c.SessionReused() - return -} - -func (c *Conn) shutdown() func() error { - c.mtx.Lock() - defer c.mtx.Unlock() - runtime.LockOSThread() - defer runtime.UnlockOSThread() - rv, errno := C.SSL_shutdown(c.ssl) - if rv > 0 { - return nil - } - if rv == 0 { - // The OpenSSL docs say that in this case, the shutdown is not - // finished, and we should call SSL_shutdown() a second time, if a - // bidirectional shutdown is going to be performed. Further, the - // output of SSL_get_error may be misleading, as an erroneous - // SSL_ERROR_SYSCALL may be flagged even though no error occurred. - // So, TODO: revisit bidrectional shutdown, possibly trying again. - // Note: some broken clients won't engage in bidirectional shutdown - // without tickling them to close by sending a TCP_FIN packet, or - // shutting down the write-side of the connection. - return nil - } else { - return c.getErrorHandler(rv, errno) - } -} - -func (c *Conn) shutdownLoop() error { - err := errTryAgain - shutdown_tries := 0 - for err == errTryAgain { - shutdown_tries = shutdown_tries + 1 - err = c.handleError(c.shutdown()) - if err == nil { - return c.flushOutputBuffer() - } - if err == errTryAgain && shutdown_tries >= 2 { - return errors.New("shutdown requested a third time?") - } - } - if err == io.ErrUnexpectedEOF { - err = nil - } - return err -} - -// Close shuts down the SSL connection and closes the underlying wrapped -// connection. -func (c *Conn) Close() error { - c.mtx.Lock() - if c.is_shutdown { - c.mtx.Unlock() - return nil - } - c.is_shutdown = true - c.mtx.Unlock() - var errs utils.ErrorGroup - errs.Add(c.shutdownLoop()) - errs.Add(c.conn.Close()) - return errs.Finalize() -} - -func (c *Conn) read(b []byte) (int, func() error) { - if len(b) == 0 { - return 0, nil - } - c.mtx.Lock() - defer c.mtx.Unlock() - if c.is_shutdown { - return 0, func() error { return io.EOF } - } - runtime.LockOSThread() - defer runtime.UnlockOSThread() - rv, errno := C.SSL_read(c.ssl, unsafe.Pointer(&b[0]), C.int(len(b))) - if rv > 0 { - return int(rv), nil - } - return 0, c.getErrorHandler(rv, errno) -} - -// Read reads up to len(b) bytes into b. It returns the number of bytes read -// and an error if applicable. io.EOF is returned when the caller can expect -// to see no more data. -func (c *Conn) Read(b []byte) (n int, err error) { - if len(b) == 0 { - return 0, nil - } - err = errTryAgain - for err == errTryAgain { - n, errcb := c.read(b) - err = c.handleError(errcb) - if err == nil { - go c.flushOutputBuffer() - return n, nil - } - if err == io.ErrUnexpectedEOF { - err = io.EOF - } - } - return 0, err -} - -func (c *Conn) write(b []byte) (int, func() error) { - if len(b) == 0 { - return 0, nil - } - c.mtx.Lock() - defer c.mtx.Unlock() - if c.is_shutdown { - err := errors.New("connection closed") - return 0, func() error { return err } - } - runtime.LockOSThread() - defer runtime.UnlockOSThread() - rv, errno := C.SSL_write(c.ssl, unsafe.Pointer(&b[0]), C.int(len(b))) - if rv > 0 { - return int(rv), nil - } - return 0, c.getErrorHandler(rv, errno) -} - -// Write will encrypt the contents of b and write it to the underlying stream. -// Performance will be vastly improved if the size of b is a multiple of -// SSLRecordSize. -func (c *Conn) Write(b []byte) (written int, err error) { - if len(b) == 0 { - return 0, nil - } - err = errTryAgain - for err == errTryAgain { - n, errcb := c.write(b) - err = c.handleError(errcb) - if err == nil { - return n, c.flushOutputBuffer() - } - } - return 0, err -} - -// VerifyHostname pulls the PeerCertificate and calls VerifyHostname on the -// certificate. -func (c *Conn) VerifyHostname(host string) error { - cert, err := c.PeerCertificate() - if err != nil { - return err - } - return cert.VerifyHostname(host) -} - -// LocalAddr returns the underlying connection's local address -func (c *Conn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -// RemoteAddr returns the underlying connection's remote address -func (c *Conn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -// SetDeadline calls SetDeadline on the underlying connection. -func (c *Conn) SetDeadline(t time.Time) error { - return c.conn.SetDeadline(t) -} - -// SetReadDeadline calls SetReadDeadline on the underlying connection. -func (c *Conn) SetReadDeadline(t time.Time) error { - return c.conn.SetReadDeadline(t) -} - -// SetWriteDeadline calls SetWriteDeadline on the underlying connection. -func (c *Conn) SetWriteDeadline(t time.Time) error { - return c.conn.SetWriteDeadline(t) -} - -func (c *Conn) UnderlyingConn() net.Conn { - return c.conn -} - -func (c *Conn) SetTlsExtHostName(name string) error { - cname := C.CString(name) - defer C.free(unsafe.Pointer(cname)) - runtime.LockOSThread() - defer runtime.UnlockOSThread() - if C.X_SSL_set_tlsext_host_name(c.ssl, cname) == 0 { - return errorFromErrorQueue() - } - return nil -} - -func (c *Conn) VerifyResult() VerifyResult { - return VerifyResult(C.SSL_get_verify_result(c.ssl)) -} - -func (c *Conn) SessionReused() bool { - return C.X_SSL_session_reused(c.ssl) == 1 -} - -func (c *Conn) GetSession() ([]byte, error) { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - // get1 increases the refcount of the session, so we have to free it. - session := (*C.SSL_SESSION)(C.SSL_get1_session(c.ssl)) - if session == nil { - return nil, errors.New("failed to get session") - } - defer C.SSL_SESSION_free(session) - - // get the size of the encoding - slen := C.i2d_SSL_SESSION(session, nil) - - buf := (*C.uchar)(C.malloc(C.size_t(slen))) - defer C.free(unsafe.Pointer(buf)) - - // this modifies the value of buf (seriously), so we have to pass in a temp - // var so that we can actually read the bytes from buf. - tmp := buf - slen2 := C.i2d_SSL_SESSION(session, &tmp) - if slen != slen2 { - return nil, errors.New("session had different lengths") - } - - return C.GoBytes(unsafe.Pointer(buf), slen), nil -} - -func (c *Conn) setSession(session []byte) error { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - ptr := (*C.uchar)(&session[0]) - s := C.d2i_SSL_SESSION(nil, &ptr, C.long(len(session))) - if s == nil { - return fmt.Errorf("unable to load session: %s", errorFromErrorQueue()) - } - defer C.SSL_SESSION_free(s) - - ret := C.SSL_set_session(c.ssl, s) - if ret != 1 { - return fmt.Errorf("unable to set session: %s", errorFromErrorQueue()) - } - return nil -} diff --git a/vendor/github.com/libp2p/go-openssl/ctx.go b/vendor/github.com/libp2p/go-openssl/ctx.go deleted file mode 100644 index 3bebf0d5..00000000 --- a/vendor/github.com/libp2p/go-openssl/ctx.go +++ /dev/null @@ -1,618 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -// #include "shim.h" -import "C" - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "runtime" - "sync" - "time" - "unsafe" - - "github.com/mattn/go-pointer" - "github.com/spacemonkeygo/spacelog" -) - -var ( - ssl_ctx_idx = C.X_SSL_CTX_new_index() - - logger = spacelog.GetLogger() -) - -type Ctx struct { - ctx *C.SSL_CTX - cert *Certificate - chain []*Certificate - key PrivateKey - verify_cb VerifyCallback - sni_cb TLSExtServernameCallback - - ticket_store_mu sync.Mutex - ticket_store *TicketStore -} - -//export get_ssl_ctx_idx -func get_ssl_ctx_idx() C.int { - return ssl_ctx_idx -} - -func newCtx(method *C.SSL_METHOD) (*Ctx, error) { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - ctx := C.SSL_CTX_new(method) - if ctx == nil { - return nil, errorFromErrorQueue() - } - c := &Ctx{ctx: ctx} - C.SSL_CTX_set_ex_data(ctx, get_ssl_ctx_idx(), pointer.Save(c)) - runtime.SetFinalizer(c, func(c *Ctx) { - C.SSL_CTX_free(c.ctx) - }) - return c, nil -} - -type SSLVersion int - -const ( - SSLv3 SSLVersion = 0x02 // Vulnerable to "POODLE" attack. - TLSv1 SSLVersion = 0x03 - TLSv1_1 SSLVersion = 0x04 - TLSv1_2 SSLVersion = 0x05 - - // Make sure to disable SSLv2 and SSLv3 if you use this. SSLv3 is vulnerable - // to the "POODLE" attack, and SSLv2 is what, just don't even. - AnyVersion SSLVersion = 0x06 -) - -// NewCtxWithVersion creates an SSL context that is specific to the provided -// SSL version. See http://www.openssl.org/docs/ssl/SSL_CTX_new.html for more. -func NewCtxWithVersion(version SSLVersion) (*Ctx, error) { - var method *C.SSL_METHOD - switch version { - case SSLv3: - method = C.X_SSLv3_method() - case TLSv1: - method = C.X_TLSv1_method() - case TLSv1_1: - method = C.X_TLSv1_1_method() - case TLSv1_2: - method = C.X_TLSv1_2_method() - case AnyVersion: - method = C.X_SSLv23_method() - } - if method == nil { - return nil, errors.New("unknown ssl/tls version") - } - return newCtx(method) -} - -// NewCtx creates a context that supports any TLS version 1.0 and newer. -func NewCtx() (*Ctx, error) { - c, err := NewCtxWithVersion(AnyVersion) - if err == nil { - c.SetOptions(NoSSLv2 | NoSSLv3) - } - return c, err -} - -// NewCtxFromFiles calls NewCtx, loads the provided files, and configures the -// context to use them. -func NewCtxFromFiles(cert_file string, key_file string) (*Ctx, error) { - ctx, err := NewCtx() - if err != nil { - return nil, err - } - - cert_bytes, err := ioutil.ReadFile(cert_file) - if err != nil { - return nil, err - } - - certs := SplitPEM(cert_bytes) - if len(certs) == 0 { - return nil, fmt.Errorf("no PEM certificate found in '%s'", cert_file) - } - first, certs := certs[0], certs[1:] - cert, err := LoadCertificateFromPEM(first) - if err != nil { - return nil, err - } - - err = ctx.UseCertificate(cert) - if err != nil { - return nil, err - } - - for _, pem := range certs { - cert, err := LoadCertificateFromPEM(pem) - if err != nil { - return nil, err - } - err = ctx.AddChainCertificate(cert) - if err != nil { - return nil, err - } - } - - key_bytes, err := ioutil.ReadFile(key_file) - if err != nil { - return nil, err - } - - key, err := LoadPrivateKeyFromPEM(key_bytes) - if err != nil { - return nil, err - } - - err = ctx.UsePrivateKey(key) - if err != nil { - return nil, err - } - - return ctx, nil -} - -// EllipticCurve repesents the ASN.1 OID of an elliptic curve. -// see https://www.openssl.org/docs/apps/ecparam.html for a list of implemented curves. -type EllipticCurve int - -const ( - // P-256: X9.62/SECG curve over a 256 bit prime field - Prime256v1 EllipticCurve = C.NID_X9_62_prime256v1 - // P-384: NIST/SECG curve over a 384 bit prime field - Secp384r1 EllipticCurve = C.NID_secp384r1 - // P-521: NIST/SECG curve over a 521 bit prime field - Secp521r1 EllipticCurve = C.NID_secp521r1 -) - -// SetEllipticCurve sets the elliptic curve used by the SSL context to -// enable an ECDH cipher suite to be selected during the handshake. -func (c *Ctx) SetEllipticCurve(curve EllipticCurve) error { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - k := C.EC_KEY_new_by_curve_name(C.int(curve)) - if k == nil { - return errors.New("unknown curve") - } - defer C.EC_KEY_free(k) - - if int(C.X_SSL_CTX_set_tmp_ecdh(c.ctx, k)) != 1 { - return errorFromErrorQueue() - } - - return nil -} - -// UseCertificate configures the context to present the given certificate to -// peers. -func (c *Ctx) UseCertificate(cert *Certificate) error { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - c.cert = cert - if int(C.SSL_CTX_use_certificate(c.ctx, cert.x)) != 1 { - return errorFromErrorQueue() - } - return nil -} - -// AddChainCertificate adds a certificate to the chain presented in the -// handshake. -func (c *Ctx) AddChainCertificate(cert *Certificate) error { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - c.chain = append(c.chain, cert) - if int(C.X_SSL_CTX_add_extra_chain_cert(c.ctx, cert.x)) != 1 { - return errorFromErrorQueue() - } - // OpenSSL takes ownership via SSL_CTX_add_extra_chain_cert - runtime.SetFinalizer(cert, nil) - return nil -} - -// UsePrivateKey configures the context to use the given private key for SSL -// handshakes. -func (c *Ctx) UsePrivateKey(key PrivateKey) error { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - c.key = key - if int(C.SSL_CTX_use_PrivateKey(c.ctx, key.evpPKey())) != 1 { - return errorFromErrorQueue() - } - return nil -} - -type CertificateStore struct { - store *C.X509_STORE - // for GC - ctx *Ctx - certs []*Certificate -} - -// Allocate a new, empty CertificateStore -func NewCertificateStore() (*CertificateStore, error) { - s := C.X509_STORE_new() - if s == nil { - return nil, errors.New("failed to allocate X509_STORE") - } - store := &CertificateStore{store: s} - runtime.SetFinalizer(store, func(s *CertificateStore) { - C.X509_STORE_free(s.store) - }) - return store, nil -} - -// Parse a chained PEM file, loading all certificates into the Store. -func (s *CertificateStore) LoadCertificatesFromPEM(data []byte) error { - pems := SplitPEM(data) - for _, pem := range pems { - cert, err := LoadCertificateFromPEM(pem) - if err != nil { - return err - } - err = s.AddCertificate(cert) - if err != nil { - return err - } - } - return nil -} - -// GetCertificateStore returns the context's certificate store that will be -// used for peer validation. -func (c *Ctx) GetCertificateStore() *CertificateStore { - // we don't need to dealloc the cert store pointer here, because it points - // to a ctx internal. so we do need to keep the ctx around - return &CertificateStore{ - store: C.SSL_CTX_get_cert_store(c.ctx), - ctx: c} -} - -// AddCertificate marks the provided Certificate as a trusted certificate in -// the given CertificateStore. -func (s *CertificateStore) AddCertificate(cert *Certificate) error { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - s.certs = append(s.certs, cert) - if int(C.X509_STORE_add_cert(s.store, cert.x)) != 1 { - return errorFromErrorQueue() - } - return nil -} - -type CertificateStoreCtx struct { - ctx *C.X509_STORE_CTX - ssl_ctx *Ctx -} - -func (csc *CertificateStoreCtx) VerifyResult() VerifyResult { - return VerifyResult(C.X509_STORE_CTX_get_error(csc.ctx)) -} - -func (csc *CertificateStoreCtx) Err() error { - code := C.X509_STORE_CTX_get_error(csc.ctx) - if code == C.X509_V_OK { - return nil - } - return fmt.Errorf("openssl: %s", - C.GoString(C.X509_verify_cert_error_string(C.long(code)))) -} - -func (csc *CertificateStoreCtx) Depth() int { - return int(C.X509_STORE_CTX_get_error_depth(csc.ctx)) -} - -// the certificate returned is only valid for the lifetime of the underlying -// X509_STORE_CTX -func (csc *CertificateStoreCtx) GetCurrentCert() *Certificate { - x509 := C.X509_STORE_CTX_get_current_cert(csc.ctx) - if x509 == nil { - return nil - } - // add a ref - if C.X_X509_add_ref(x509) != 1 { - return nil - } - cert := &Certificate{ - x: x509, - } - runtime.SetFinalizer(cert, func(cert *Certificate) { - C.X509_free(cert.x) - }) - return cert -} - -// LoadVerifyLocations tells the context to trust all certificate authorities -// provided in either the ca_file or the ca_path. -// See http://www.openssl.org/docs/ssl/SSL_CTX_load_verify_locations.html for -// more. -func (c *Ctx) LoadVerifyLocations(ca_file string, ca_path string) error { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - var c_ca_file, c_ca_path *C.char - if ca_file != "" { - c_ca_file = C.CString(ca_file) - defer C.free(unsafe.Pointer(c_ca_file)) - } - if ca_path != "" { - c_ca_path = C.CString(ca_path) - defer C.free(unsafe.Pointer(c_ca_path)) - } - if C.SSL_CTX_load_verify_locations(c.ctx, c_ca_file, c_ca_path) != 1 { - return errorFromErrorQueue() - } - return nil -} - -type Version int - -const ( - SSL3_VERSION Version = C.SSL3_VERSION - TLS1_VERSION Version = C.TLS1_VERSION - TLS1_1_VERSION Version = C.TLS1_1_VERSION - TLS1_2_VERSION Version = C.TLS1_2_VERSION - TLS1_3_VERSION Version = C.TLS1_3_VERSION - DTLS1_VERSION Version = C.DTLS1_VERSION - DTLS1_2_VERSION Version = C.DTLS1_2_VERSION -) - -// SetMinProtoVersion sets the minimum supported protocol version for the Ctx. -// http://www.openssl.org/docs/ssl/SSL_CTX_set_min_proto_version.html -func (c *Ctx) SetMinProtoVersion(version Version) bool { - return C.X_SSL_CTX_set_min_proto_version( - c.ctx, C.int(version)) == 1 -} - -// SetMaxProtoVersion sets the maximum supported protocol version for the Ctx. -// http://www.openssl.org/docs/ssl/SSL_CTX_set_max_proto_version.html -func (c *Ctx) SetMaxProtoVersion(version Version) bool { - return C.X_SSL_CTX_set_max_proto_version( - c.ctx, C.int(version)) == 1 -} - -type Options int - -const ( - // NoCompression is only valid if you are using OpenSSL 1.0.1 or newer - NoCompression Options = C.SSL_OP_NO_COMPRESSION - NoSSLv2 Options = C.SSL_OP_NO_SSLv2 - NoSSLv3 Options = C.SSL_OP_NO_SSLv3 - NoTLSv1 Options = C.SSL_OP_NO_TLSv1 - CipherServerPreference Options = C.SSL_OP_CIPHER_SERVER_PREFERENCE - NoSessionResumptionOrRenegotiation Options = C.SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION - NoTicket Options = C.SSL_OP_NO_TICKET -) - -// SetOptions sets context options. See -// http://www.openssl.org/docs/ssl/SSL_CTX_set_options.html -func (c *Ctx) SetOptions(options Options) Options { - return Options(C.X_SSL_CTX_set_options( - c.ctx, C.long(options))) -} - -func (c *Ctx) ClearOptions(options Options) Options { - return Options(C.X_SSL_CTX_clear_options( - c.ctx, C.long(options))) -} - -// GetOptions returns context options. See -// https://www.openssl.org/docs/ssl/SSL_CTX_set_options.html -func (c *Ctx) GetOptions() Options { - return Options(C.X_SSL_CTX_get_options(c.ctx)) -} - -type Modes int - -const ( - // ReleaseBuffers is only valid if you are using OpenSSL 1.0.1 or newer - ReleaseBuffers Modes = C.SSL_MODE_RELEASE_BUFFERS -) - -// SetMode sets context modes. See -// http://www.openssl.org/docs/ssl/SSL_CTX_set_mode.html -func (c *Ctx) SetMode(modes Modes) Modes { - return Modes(C.X_SSL_CTX_set_mode(c.ctx, C.long(modes))) -} - -// GetMode returns context modes. See -// http://www.openssl.org/docs/ssl/SSL_CTX_set_mode.html -func (c *Ctx) GetMode() Modes { - return Modes(C.X_SSL_CTX_get_mode(c.ctx)) -} - -type VerifyOptions int - -const ( - VerifyNone VerifyOptions = C.SSL_VERIFY_NONE - VerifyPeer VerifyOptions = C.SSL_VERIFY_PEER - VerifyFailIfNoPeerCert VerifyOptions = C.SSL_VERIFY_FAIL_IF_NO_PEER_CERT - VerifyClientOnce VerifyOptions = C.SSL_VERIFY_CLIENT_ONCE -) - -type VerifyCallback func(ok bool, store *CertificateStoreCtx) bool - -//export go_ssl_ctx_verify_cb_thunk -func go_ssl_ctx_verify_cb_thunk(p unsafe.Pointer, ok C.int, ctx *C.X509_STORE_CTX) C.int { - defer func() { - if err := recover(); err != nil { - logger.Critf("openssl: verify callback panic'd: %v", err) - os.Exit(1) - } - }() - verify_cb := pointer.Restore(p).(*Ctx).verify_cb - // set up defaults just in case verify_cb is nil - if verify_cb != nil { - store := &CertificateStoreCtx{ctx: ctx} - if verify_cb(ok == 1, store) { - ok = 1 - } else { - ok = 0 - } - } - return ok -} - -// SetVerify controls peer verification settings. See -// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html -func (c *Ctx) SetVerify(options VerifyOptions, verify_cb VerifyCallback) { - c.verify_cb = verify_cb - if verify_cb != nil { - C.SSL_CTX_set_verify(c.ctx, C.int(options), (*[0]byte)(C.X_SSL_CTX_verify_cb)) - } else { - C.SSL_CTX_set_verify(c.ctx, C.int(options), nil) - } -} - -func (c *Ctx) SetVerifyMode(options VerifyOptions) { - c.SetVerify(options, c.verify_cb) -} - -func (c *Ctx) SetVerifyCallback(verify_cb VerifyCallback) { - c.SetVerify(c.VerifyMode(), verify_cb) -} - -func (c *Ctx) GetVerifyCallback() VerifyCallback { - return c.verify_cb -} - -func (c *Ctx) VerifyMode() VerifyOptions { - return VerifyOptions(C.SSL_CTX_get_verify_mode(c.ctx)) -} - -// SetVerifyDepth controls how many certificates deep the certificate -// verification logic is willing to follow a certificate chain. See -// https://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html -func (c *Ctx) SetVerifyDepth(depth int) { - C.SSL_CTX_set_verify_depth(c.ctx, C.int(depth)) -} - -// GetVerifyDepth controls how many certificates deep the certificate -// verification logic is willing to follow a certificate chain. See -// https://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html -func (c *Ctx) GetVerifyDepth() int { - return int(C.SSL_CTX_get_verify_depth(c.ctx)) -} - -type TLSExtServernameCallback func(ssl *SSL) SSLTLSExtErr - -// SetTLSExtServernameCallback sets callback function for Server Name Indication -// (SNI) rfc6066 (http://tools.ietf.org/html/rfc6066). See -// http://stackoverflow.com/questions/22373332/serving-multiple-domains-in-one-box-with-sni -func (c *Ctx) SetTLSExtServernameCallback(sni_cb TLSExtServernameCallback) { - c.sni_cb = sni_cb - C.X_SSL_CTX_set_tlsext_servername_callback(c.ctx, (*[0]byte)(C.sni_cb)) -} - -func (c *Ctx) SetSessionId(session_id []byte) error { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - var ptr *C.uchar - if len(session_id) > 0 { - ptr = (*C.uchar)(unsafe.Pointer(&session_id[0])) - } - if int(C.SSL_CTX_set_session_id_context(c.ctx, ptr, - C.uint(len(session_id)))) == 0 { - return errorFromErrorQueue() - } - return nil -} - -// SetCipherList sets the list of available ciphers. The format of the list is -// described at http://www.openssl.org/docs/apps/ciphers.html, but see -// http://www.openssl.org/docs/ssl/SSL_CTX_set_cipher_list.html for more. -func (c *Ctx) SetCipherList(list string) error { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - clist := C.CString(list) - defer C.free(unsafe.Pointer(clist)) - if int(C.SSL_CTX_set_cipher_list(c.ctx, clist)) == 0 { - return errorFromErrorQueue() - } - return nil -} - -// SetNextProtos sets Negotiation protocol to the ctx. -func (c *Ctx) SetNextProtos(protos []string) error { - if len(protos) == 0 { - return nil - } - vector := make([]byte, 0) - for _, proto := range protos { - if len(proto) > 255 { - return fmt.Errorf( - "proto length can't be more than 255. But got a proto %s with length %d", - proto, len(proto)) - } - vector = append(vector, byte(uint8(len(proto)))) - vector = append(vector, []byte(proto)...) - } - ret := int(C.SSL_CTX_set_alpn_protos(c.ctx, (*C.uchar)(unsafe.Pointer(&vector[0])), - C.uint(len(vector)))) - if ret != 0 { - return errors.New("error while setting protos to ctx") - } - return nil -} - -type SessionCacheModes int - -const ( - SessionCacheOff SessionCacheModes = C.SSL_SESS_CACHE_OFF - SessionCacheClient SessionCacheModes = C.SSL_SESS_CACHE_CLIENT - SessionCacheServer SessionCacheModes = C.SSL_SESS_CACHE_SERVER - SessionCacheBoth SessionCacheModes = C.SSL_SESS_CACHE_BOTH - NoAutoClear SessionCacheModes = C.SSL_SESS_CACHE_NO_AUTO_CLEAR - NoInternalLookup SessionCacheModes = C.SSL_SESS_CACHE_NO_INTERNAL_LOOKUP - NoInternalStore SessionCacheModes = C.SSL_SESS_CACHE_NO_INTERNAL_STORE - NoInternal SessionCacheModes = C.SSL_SESS_CACHE_NO_INTERNAL -) - -// SetSessionCacheMode enables or disables session caching. See -// http://www.openssl.org/docs/ssl/SSL_CTX_set_session_cache_mode.html -func (c *Ctx) SetSessionCacheMode(modes SessionCacheModes) SessionCacheModes { - return SessionCacheModes( - C.X_SSL_CTX_set_session_cache_mode(c.ctx, C.long(modes))) -} - -// Set session cache timeout. Returns previously set value. -// See https://www.openssl.org/docs/ssl/SSL_CTX_set_timeout.html -func (c *Ctx) SetTimeout(t time.Duration) time.Duration { - prev := C.X_SSL_CTX_set_timeout(c.ctx, C.long(t/time.Second)) - return time.Duration(prev) * time.Second -} - -// Get session cache timeout. -// See https://www.openssl.org/docs/ssl/SSL_CTX_set_timeout.html -func (c *Ctx) GetTimeout() time.Duration { - return time.Duration(C.X_SSL_CTX_get_timeout(c.ctx)) * time.Second -} - -// Set session cache size. Returns previously set value. -// https://www.openssl.org/docs/ssl/SSL_CTX_sess_set_cache_size.html -func (c *Ctx) SessSetCacheSize(t int) int { - return int(C.X_SSL_CTX_sess_set_cache_size(c.ctx, C.long(t))) -} - -// Get session cache size. -// https://www.openssl.org/docs/ssl/SSL_CTX_sess_set_cache_size.html -func (c *Ctx) SessGetCacheSize() int { - return int(C.X_SSL_CTX_sess_get_cache_size(c.ctx)) -} diff --git a/vendor/github.com/libp2p/go-openssl/dh.go b/vendor/github.com/libp2p/go-openssl/dh.go deleted file mode 100644 index 75ac5ad4..00000000 --- a/vendor/github.com/libp2p/go-openssl/dh.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -// #include "shim.h" -import "C" -import ( - "errors" - "unsafe" -) - -// DeriveSharedSecret derives a shared secret using a private key and a peer's -// public key. -// The specific algorithm that is used depends on the types of the -// keys, but it is most commonly a variant of Diffie-Hellman. -func DeriveSharedSecret(private PrivateKey, public PublicKey) ([]byte, error) { - // Create context for the shared secret derivation - dhCtx := C.EVP_PKEY_CTX_new(private.evpPKey(), nil) - if dhCtx == nil { - return nil, errors.New("failed creating shared secret derivation context") - } - defer C.EVP_PKEY_CTX_free(dhCtx) - - // Initialize the context - if int(C.EVP_PKEY_derive_init(dhCtx)) != 1 { - return nil, errors.New("failed initializing shared secret derivation context") - } - - // Provide the peer's public key - if int(C.EVP_PKEY_derive_set_peer(dhCtx, public.evpPKey())) != 1 { - return nil, errors.New("failed adding peer public key to context") - } - - // Determine how large of a buffer we need for the shared secret - var buffLen C.size_t - if int(C.EVP_PKEY_derive(dhCtx, nil, &buffLen)) != 1 { - return nil, errors.New("failed determining shared secret length") - } - - // Allocate a buffer - buffer := C.X_OPENSSL_malloc(buffLen) - if buffer == nil { - return nil, errors.New("failed allocating buffer for shared secret") - } - defer C.X_OPENSSL_free(buffer) - - // Derive the shared secret - if int(C.EVP_PKEY_derive(dhCtx, (*C.uchar)(buffer), &buffLen)) != 1 { - return nil, errors.New("failed deriving the shared secret") - } - - secret := C.GoBytes(unsafe.Pointer(buffer), C.int(buffLen)) - return secret, nil -} diff --git a/vendor/github.com/libp2p/go-openssl/dhparam.go b/vendor/github.com/libp2p/go-openssl/dhparam.go deleted file mode 100644 index 294d0645..00000000 --- a/vendor/github.com/libp2p/go-openssl/dhparam.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -// #include "shim.h" -import "C" - -import ( - "errors" - "runtime" - "unsafe" -) - -type DH struct { - dh *C.struct_dh_st -} - -// LoadDHParametersFromPEM loads the Diffie-Hellman parameters from -// a PEM-encoded block. -func LoadDHParametersFromPEM(pem_block []byte) (*DH, error) { - if len(pem_block) == 0 { - return nil, errors.New("empty pem block") - } - bio := C.BIO_new_mem_buf(unsafe.Pointer(&pem_block[0]), - C.int(len(pem_block))) - if bio == nil { - return nil, errors.New("failed creating bio") - } - defer C.BIO_free(bio) - - params := C.PEM_read_bio_DHparams(bio, nil, nil, nil) - if params == nil { - return nil, errors.New("failed reading dh parameters") - } - dhparams := &DH{dh: params} - runtime.SetFinalizer(dhparams, func(dhparams *DH) { - C.DH_free(dhparams.dh) - }) - return dhparams, nil -} - -// SetDHParameters sets the DH group (DH parameters) used to -// negotiate an emphemeral DH key during handshaking. -func (c *Ctx) SetDHParameters(dh *DH) error { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - if int(C.X_SSL_CTX_set_tmp_dh(c.ctx, dh.dh)) != 1 { - return errorFromErrorQueue() - } - return nil -} diff --git a/vendor/github.com/libp2p/go-openssl/digest.go b/vendor/github.com/libp2p/go-openssl/digest.go deleted file mode 100644 index 6d8d2635..00000000 --- a/vendor/github.com/libp2p/go-openssl/digest.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -// #include "shim.h" -import "C" - -import ( - "fmt" - "unsafe" -) - -// Digest represents and openssl message digest. -type Digest struct { - ptr *C.EVP_MD -} - -// GetDigestByName returns the Digest with the name or nil and an error if the -// digest was not found. -func GetDigestByName(name string) (*Digest, error) { - cname := C.CString(name) - defer C.free(unsafe.Pointer(cname)) - p := C.X_EVP_get_digestbyname(cname) - if p == nil { - return nil, fmt.Errorf("Digest %v not found", name) - } - // we can consider digests to use static mem; don't need to free - return &Digest{ptr: p}, nil -} - -// GetDigestByName returns the Digest with the NID or nil and an error if the -// digest was not found. -func GetDigestByNid(nid NID) (*Digest, error) { - sn, err := Nid2ShortName(nid) - if err != nil { - return nil, err - } - return GetDigestByName(sn) -} diff --git a/vendor/github.com/libp2p/go-openssl/engine.go b/vendor/github.com/libp2p/go-openssl/engine.go deleted file mode 100644 index 78aef956..00000000 --- a/vendor/github.com/libp2p/go-openssl/engine.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -/* -#include "openssl/engine.h" -*/ -import "C" - -import ( - "fmt" - "runtime" - "unsafe" -) - -type Engine struct { - e *C.ENGINE -} - -func EngineById(name string) (*Engine, error) { - cname := C.CString(name) - defer C.free(unsafe.Pointer(cname)) - e := &Engine{ - e: C.ENGINE_by_id(cname), - } - if e.e == nil { - return nil, fmt.Errorf("engine %s missing", name) - } - if C.ENGINE_init(e.e) == 0 { - C.ENGINE_free(e.e) - return nil, fmt.Errorf("engine %s not initialized", name) - } - runtime.SetFinalizer(e, func(e *Engine) { - C.ENGINE_finish(e.e) - C.ENGINE_free(e.e) - }) - return e, nil -} diff --git a/vendor/github.com/libp2p/go-openssl/extension.c b/vendor/github.com/libp2p/go-openssl/extension.c deleted file mode 100644 index 99f1ca3d..00000000 --- a/vendor/github.com/libp2p/go-openssl/extension.c +++ /dev/null @@ -1,40 +0,0 @@ - - -#include -#include - -const unsigned char * get_extention(X509 *x, int NID, int *data_len){ - int loc; - ASN1_OCTET_STRING *octet_str; - long xlen; - int tag, xclass; - - loc = X509_get_ext_by_NID( x, NID, -1); - X509_EXTENSION *ex = X509_get_ext(x, loc); - octet_str = X509_EXTENSION_get_data(ex); - *data_len = octet_str->length; - return octet_str->data; -} - -// Copied from https://github.com/libtor/openssl/blob/master/demos/x509/mkcert.c#L153 -int add_custom_ext(X509 *cert, int nid,unsigned char *value, int len) -{ - X509_EXTENSION *ex; - ASN1_OCTET_STRING *os = ASN1_OCTET_STRING_new(); - ASN1_OCTET_STRING_set(os,value,len); - X509V3_CTX ctx; - /* This sets the 'context' of the extensions. */ - /* No configuration database */ - X509V3_set_ctx_nodb(&ctx); - /* Issuer and subject certs: both the target since it is self signed, - * no request and no CRL - */ - X509V3_set_ctx(&ctx, cert, cert, NULL, NULL, 0); - // ref http://openssl.6102.n7.nabble.com/Adding-a-custom-extension-to-a-CSR-td47446.html - ex = X509_EXTENSION_create_by_NID( NULL, nid, 0, os); - if (!X509_add_ext(cert,ex,-1)) - return 0; - - X509_EXTENSION_free(ex); - return 1; -} \ No newline at end of file diff --git a/vendor/github.com/libp2p/go-openssl/fips.go b/vendor/github.com/libp2p/go-openssl/fips.go deleted file mode 100644 index b15b9bf0..00000000 --- a/vendor/github.com/libp2p/go-openssl/fips.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -/* -#include - -#if OPENSSL_VERSION_NUMBER >= 0x30000000L - int FIPS_mode_set(int ONOFF) { - return 0; - } -#endif - -*/ -import "C" -import "errors" -import "runtime" - -// FIPSModeSet enables a FIPS 140-2 validated mode of operation. -// https://wiki.openssl.org/index.php/FIPS_mode_set() -// This call has been deleted from OpenSSL 3.0. -func FIPSModeSet(mode bool) error { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - if C.OPENSSL_VERSION_NUMBER >= 0x30000000 { - return errors.New("FIPS_mode_set() has been deleted from OpenSSL 3.0") - } - - var r C.int - if mode { - r = C.FIPS_mode_set(1) - } else { - r = C.FIPS_mode_set(0) - } - if r != 1 { - return errorFromErrorQueue() - } - return nil -} diff --git a/vendor/github.com/libp2p/go-openssl/hmac.go b/vendor/github.com/libp2p/go-openssl/hmac.go deleted file mode 100644 index 77e8dc58..00000000 --- a/vendor/github.com/libp2p/go-openssl/hmac.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -// #include "shim.h" -import "C" - -import ( - "errors" - "runtime" - "unsafe" -) - -type HMAC struct { - ctx *C.HMAC_CTX - engine *Engine - md *C.EVP_MD -} - -func NewHMAC(key []byte, digestAlgorithm EVP_MD) (*HMAC, error) { - return NewHMACWithEngine(key, digestAlgorithm, nil) -} - -func NewHMACWithEngine(key []byte, digestAlgorithm EVP_MD, e *Engine) (*HMAC, error) { - var md *C.EVP_MD = getDigestFunction(digestAlgorithm) - h := &HMAC{engine: e, md: md} - h.ctx = C.X_HMAC_CTX_new() - if h.ctx == nil { - return nil, errors.New("unable to allocate HMAC_CTX") - } - - var c_e *C.ENGINE - if e != nil { - c_e = e.e - } - if rc := C.X_HMAC_Init_ex(h.ctx, - unsafe.Pointer(&key[0]), - C.int(len(key)), - md, - c_e); rc != 1 { - C.X_HMAC_CTX_free(h.ctx) - return nil, errors.New("failed to initialize HMAC_CTX") - } - - runtime.SetFinalizer(h, func(h *HMAC) { h.Close() }) - return h, nil -} - -func (h *HMAC) Close() { - C.X_HMAC_CTX_free(h.ctx) -} - -func (h *HMAC) Write(data []byte) (n int, err error) { - if len(data) == 0 { - return 0, nil - } - if rc := C.X_HMAC_Update(h.ctx, (*C.uchar)(unsafe.Pointer(&data[0])), - C.size_t(len(data))); rc != 1 { - return 0, errors.New("failed to update HMAC") - } - return len(data), nil -} - -func (h *HMAC) Reset() error { - if C.X_HMAC_Init_ex(h.ctx, nil, 0, nil, nil) != 1 { - return errors.New("failed to reset HMAC_CTX") - } - return nil -} - -func (h *HMAC) Final() (result []byte, err error) { - mdLength := C.X_EVP_MD_size(h.md) - result = make([]byte, mdLength) - if rc := C.X_HMAC_Final(h.ctx, (*C.uchar)(unsafe.Pointer(&result[0])), - (*C.uint)(unsafe.Pointer(&mdLength))); rc != 1 { - return nil, errors.New("failed to finalized HMAC") - } - return result, h.Reset() -} diff --git a/vendor/github.com/libp2p/go-openssl/hostname.c b/vendor/github.com/libp2p/go-openssl/hostname.c deleted file mode 100644 index 0bffecad..00000000 --- a/vendor/github.com/libp2p/go-openssl/hostname.c +++ /dev/null @@ -1,373 +0,0 @@ -/* - * Go-OpenSSL notice: - * This file is required for all OpenSSL versions prior to 1.1.0. This simply - * provides the new 1.1.0 X509_check_* methods for hostname validation if they - * don't already exist. - */ - -#include - -#ifndef X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT - -/* portions from x509v3.h and v3_utl.c */ -/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL - * project. - */ -/* ==================================================================== - * Copyright (c) 1999-2003 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * licensing@OpenSSL.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). - * - */ -/* X509 v3 extension utilities */ - -#include -#include -#include -#include -#include - -#define X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT 0x1 -#define X509_CHECK_FLAG_NO_WILDCARDS 0x2 - -typedef int (*equal_fn)(const unsigned char *pattern, size_t pattern_len, - const unsigned char *subject, size_t subject_len); - -/* Compare while ASCII ignoring case. */ -static int equal_nocase(const unsigned char *pattern, size_t pattern_len, - const unsigned char *subject, size_t subject_len) - { - if (pattern_len != subject_len) - return 0; - while (pattern_len) - { - unsigned char l = *pattern; - unsigned char r = *subject; - /* The pattern must not contain NUL characters. */ - if (l == 0) - return 0; - if (l != r) - { - if ('A' <= l && l <= 'Z') - l = (l - 'A') + 'a'; - if ('A' <= r && r <= 'Z') - r = (r - 'A') + 'a'; - if (l != r) - return 0; - } - ++pattern; - ++subject; - --pattern_len; - } - return 1; - } - -/* Compare using memcmp. */ -static int equal_case(const unsigned char *pattern, size_t pattern_len, - const unsigned char *subject, size_t subject_len) -{ - /* The pattern must not contain NUL characters. */ - if (memchr(pattern, '\0', pattern_len) != NULL) - return 0; - if (pattern_len != subject_len) - return 0; - return !memcmp(pattern, subject, pattern_len); -} - -/* RFC 5280, section 7.5, requires that only the domain is compared in - a case-insensitive manner. */ -static int equal_email(const unsigned char *a, size_t a_len, - const unsigned char *b, size_t b_len) - { - size_t i = a_len; - if (a_len != b_len) - return 0; - /* We search backwards for the '@' character, so that we do - not have to deal with quoted local-parts. The domain part - is compared in a case-insensitive manner. */ - while (i > 0) - { - --i; - if (a[i] == '@' || b[i] == '@') - { - if (!equal_nocase(a + i, a_len - i, - b + i, a_len - i)) - return 0; - break; - } - } - if (i == 0) - i = a_len; - return equal_case(a, i, b, i); - } - -/* Compare the prefix and suffix with the subject, and check that the - characters in-between are valid. */ -static int wildcard_match(const unsigned char *prefix, size_t prefix_len, - const unsigned char *suffix, size_t suffix_len, - const unsigned char *subject, size_t subject_len) - { - const unsigned char *wildcard_start; - const unsigned char *wildcard_end; - const unsigned char *p; - if (subject_len < prefix_len + suffix_len) - return 0; - if (!equal_nocase(prefix, prefix_len, subject, prefix_len)) - return 0; - wildcard_start = subject + prefix_len; - wildcard_end = subject + (subject_len - suffix_len); - if (!equal_nocase(wildcard_end, suffix_len, suffix, suffix_len)) - return 0; - /* The wildcard must match at least one character. */ - if (wildcard_start == wildcard_end) - return 0; - /* Check that the part matched by the wildcard contains only - permitted characters and only matches a single label. */ - for (p = wildcard_start; p != wildcard_end; ++p) - if (!(('0' <= *p && *p <= '9') || - ('A' <= *p && *p <= 'Z') || - ('a' <= *p && *p <= 'z') || - *p == '-')) - return 0; - return 1; - } - -/* Checks if the memory region consistens of [0-9A-Za-z.-]. */ -static int valid_domain_characters(const unsigned char *p, size_t len) - { - while (len) - { - if (!(('0' <= *p && *p <= '9') || - ('A' <= *p && *p <= 'Z') || - ('a' <= *p && *p <= 'z') || - *p == '-' || *p == '.')) - return 0; - ++p; - --len; - } - return 1; - } - -/* Find the '*' in a wildcard pattern. If no such character is found - or the pattern is otherwise invalid, returns NULL. */ -static const unsigned char *wildcard_find_star(const unsigned char *pattern, - size_t pattern_len) - { - const unsigned char *star = memchr(pattern, '*', pattern_len); - size_t dot_count = 0; - const unsigned char *suffix_start; - size_t suffix_length; - if (star == NULL) - return NULL; - suffix_start = star + 1; - suffix_length = (pattern + pattern_len) - (star + 1); - if (!(valid_domain_characters(pattern, star - pattern) && - valid_domain_characters(suffix_start, suffix_length))) - return NULL; - /* Check that the suffix matches at least two labels. */ - while (suffix_length) - { - if (*suffix_start == '.') - ++dot_count; - ++suffix_start; - --suffix_length; - } - if (dot_count < 2) - return NULL; - return star; - } - -/* Compare using wildcards. */ -static int equal_wildcard(const unsigned char *pattern, size_t pattern_len, - const unsigned char *subject, size_t subject_len) - { - const unsigned char *star = wildcard_find_star(pattern, pattern_len); - if (star == NULL) - return equal_nocase(pattern, pattern_len, - subject, subject_len); - return wildcard_match(pattern, star - pattern, - star + 1, (pattern + pattern_len) - star - 1, - subject, subject_len); - } - -/* Compare an ASN1_STRING to a supplied string. If they match - * return 1. If cmp_type > 0 only compare if string matches the - * type, otherwise convert it to UTF8. - */ - -static int do_check_string(ASN1_STRING *a, int cmp_type, equal_fn equal, - const unsigned char *b, size_t blen) - { - if (!a->data || !a->length) - return 0; - if (cmp_type > 0) - { - if (cmp_type != a->type) - return 0; - if (cmp_type == V_ASN1_IA5STRING) - return equal(a->data, a->length, b, blen); - if (a->length == (int)blen && !memcmp(a->data, b, blen)) - return 1; - else - return 0; - } - else - { - int astrlen, rv; - unsigned char *astr; - astrlen = ASN1_STRING_to_UTF8(&astr, a); - if (astrlen < 0) - return -1; - rv = equal(astr, astrlen, b, blen); - OPENSSL_free(astr); - return rv; - } - } - -static int do_x509_check(X509 *x, const unsigned char *chk, size_t chklen, - unsigned int flags, int check_type) - { - STACK_OF(GENERAL_NAME) *gens = NULL; - X509_NAME *name = NULL; - int i; - int cnid; - int alt_type; - equal_fn equal; - if (check_type == GEN_EMAIL) - { - cnid = NID_pkcs9_emailAddress; - alt_type = V_ASN1_IA5STRING; - equal = equal_email; - } - else if (check_type == GEN_DNS) - { - cnid = NID_commonName; - alt_type = V_ASN1_IA5STRING; - if (flags & X509_CHECK_FLAG_NO_WILDCARDS) - equal = equal_nocase; - else - equal = equal_wildcard; - } - else - { - cnid = 0; - alt_type = V_ASN1_OCTET_STRING; - equal = equal_case; - } - - if (chklen == 0) - chklen = strlen((const char *)chk); - - gens = X509_get_ext_d2i(x, NID_subject_alt_name, NULL, NULL); - if (gens) - { - int rv = 0; - for (i = 0; i < sk_GENERAL_NAME_num(gens); i++) - { - GENERAL_NAME *gen; - ASN1_STRING *cstr; - gen = sk_GENERAL_NAME_value(gens, i); - if(gen->type != check_type) - continue; - if (check_type == GEN_EMAIL) - cstr = gen->d.rfc822Name; - else if (check_type == GEN_DNS) - cstr = gen->d.dNSName; - else - cstr = gen->d.iPAddress; - if (do_check_string(cstr, alt_type, equal, chk, chklen)) - { - rv = 1; - break; - } - } - GENERAL_NAMES_free(gens); - if (rv) - return 1; - if (!(flags & X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT) || !cnid) - return 0; - } - i = -1; - name = X509_get_subject_name(x); - while((i = X509_NAME_get_index_by_NID(name, cnid, i)) >= 0) - { - X509_NAME_ENTRY *ne; - ASN1_STRING *str; - ne = X509_NAME_get_entry(name, i); - str = X509_NAME_ENTRY_get_data(ne); - if (do_check_string(str, -1, equal, chk, chklen)) - return 1; - } - return 0; - } - -#if OPENSSL_VERSION_NUMBER < 0x1000200fL - -int X509_check_host(X509 *x, const unsigned char *chk, size_t chklen, - unsigned int flags, char **peername) - { - return do_x509_check(x, chk, chklen, flags, GEN_DNS); - } - -int X509_check_email(X509 *x, const unsigned char *chk, size_t chklen, - unsigned int flags) - { - return do_x509_check(x, chk, chklen, flags, GEN_EMAIL); - } - -int X509_check_ip(X509 *x, const unsigned char *chk, size_t chklen, - unsigned int flags) - { - return do_x509_check(x, chk, chklen, flags, GEN_IPADD); - } - -#endif /* OPENSSL_VERSION_NUMBER < 0x1000200fL */ - -#endif diff --git a/vendor/github.com/libp2p/go-openssl/hostname.go b/vendor/github.com/libp2p/go-openssl/hostname.go deleted file mode 100644 index 9ef4ba29..00000000 --- a/vendor/github.com/libp2p/go-openssl/hostname.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -/* -#include -#include -#if OPENSSL_VERSION_NUMBER >= 0x30000000L - #include - typedef const char x509char; -#else - #include - - #ifndef X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT - #define X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT 0x1 - #define X509_CHECK_FLAG_NO_WILDCARDS 0x2 - - extern int X509_check_host(X509 *x, const unsigned char *chk, size_t chklen, - unsigned int flags, char **peername); - extern int X509_check_email(X509 *x, const unsigned char *chk, size_t chklen, - unsigned int flags); - extern int X509_check_ip(X509 *x, const unsigned char *chk, size_t chklen, - unsigned int flags); - typedef const unsigned char x509char; - #else - typedef const char x509char; - #endif -#endif -*/ -import "C" - -import ( - "errors" - "net" - "unsafe" -) - -var ( - ValidationError = errors.New("host validation error") //lint:ignore ST1012 rename may cause breaking changes; research before renaming. -) - -type CheckFlags int - -const ( - AlwaysCheckSubject CheckFlags = C.X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT - NoWildcards CheckFlags = C.X509_CHECK_FLAG_NO_WILDCARDS -) - -// CheckHost checks that the X509 certificate is signed for the provided -// host name. See http://www.openssl.org/docs/crypto/X509_check_host.html for -// more. Note that CheckHost does not check the IP field. See VerifyHostname. -// Specifically returns ValidationError if the Certificate didn't match but -// there was no internal error. -func (c *Certificate) CheckHost(host string, flags CheckFlags) error { - chost := unsafe.Pointer(C.CString(host)) - defer C.free(chost) - - rv := C.X509_check_host(c.x, (*C.x509char)(chost), C.size_t(len(host)), - C.uint(flags), nil) - if rv > 0 { - return nil - } - if rv == 0 { - return ValidationError - } - return errors.New("hostname validation had an internal failure") -} - -// CheckEmail checks that the X509 certificate is signed for the provided -// email address. See http://www.openssl.org/docs/crypto/X509_check_host.html -// for more. -// Specifically returns ValidationError if the Certificate didn't match but -// there was no internal error. -func (c *Certificate) CheckEmail(email string, flags CheckFlags) error { - cemail := unsafe.Pointer(C.CString(email)) - defer C.free(cemail) - rv := C.X509_check_email(c.x, (*C.x509char)(cemail), C.size_t(len(email)), - C.uint(flags)) - if rv > 0 { - return nil - } - if rv == 0 { - return ValidationError - } - return errors.New("email validation had an internal failure") -} - -// CheckIP checks that the X509 certificate is signed for the provided -// IP address. See http://www.openssl.org/docs/crypto/X509_check_host.html -// for more. -// Specifically returns ValidationError if the Certificate didn't match but -// there was no internal error. -func (c *Certificate) CheckIP(ip net.IP, flags CheckFlags) error { - // X509_check_ip will fail to validate the 16-byte representation of an IPv4 - // address, so convert to the 4-byte representation. - if ip4 := ip.To4(); ip4 != nil { - ip = ip4 - } - - cip := unsafe.Pointer(&ip[0]) - rv := C.X509_check_ip(c.x, (*C.uchar)(cip), C.size_t(len(ip)), - C.uint(flags)) - if rv > 0 { - return nil - } - if rv == 0 { - return ValidationError - } - return errors.New("ip validation had an internal failure") -} - -// VerifyHostname is a combination of CheckHost and CheckIP. If the provided -// hostname looks like an IP address, it will be checked as an IP address, -// otherwise it will be checked as a hostname. -// Specifically returns ValidationError if the Certificate didn't match but -// there was no internal error. -func (c *Certificate) VerifyHostname(host string) error { - var ip net.IP - if len(host) >= 3 && host[0] == '[' && host[len(host)-1] == ']' { - ip = net.ParseIP(host[1 : len(host)-1]) - } else { - ip = net.ParseIP(host) - } - if ip != nil { - return c.CheckIP(ip, 0) - } - return c.CheckHost(host, 0) -} diff --git a/vendor/github.com/libp2p/go-openssl/http.go b/vendor/github.com/libp2p/go-openssl/http.go deleted file mode 100644 index 39bd5a28..00000000 --- a/vendor/github.com/libp2p/go-openssl/http.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -import ( - "net/http" -) - -// ListenAndServeTLS will take an http.Handler and serve it using OpenSSL over -// the given tcp address, configured to use the provided cert and key files. -func ListenAndServeTLS(addr string, cert_file string, key_file string, - handler http.Handler) error { - return ServerListenAndServeTLS( - &http.Server{Addr: addr, Handler: handler}, cert_file, key_file) -} - -// ServerListenAndServeTLS will take an http.Server and serve it using OpenSSL -// configured to use the provided cert and key files. -func ServerListenAndServeTLS(srv *http.Server, - cert_file, key_file string) error { - addr := srv.Addr - if addr == "" { - addr = ":https" - } - - ctx, err := NewCtxFromFiles(cert_file, key_file) - if err != nil { - return err - } - - l, err := Listen("tcp", addr, ctx) - if err != nil { - return err - } - - return srv.Serve(l) -} - -// TODO: http client integration -// holy crap, getting this integrated nicely with the Go stdlib HTTP client -// stack so that it does proxying, connection pooling, and most importantly -// hostname verification is really hard. So much stuff is hardcoded to just use -// the built-in TLS lib. I think to get this to work either some crazy -// hacktackery beyond me, an almost straight up fork of the HTTP client, or -// serious stdlib internal refactoring is necessary. -// even more so, good luck getting openssl to use the operating system default -// root certificates if the user doesn't provide any. sadlol -// NOTE: if you're going to try and write your own round tripper, at least use -// openssl.Dial, or equivalent logic diff --git a/vendor/github.com/libp2p/go-openssl/init.go b/vendor/github.com/libp2p/go-openssl/init.go deleted file mode 100644 index 107adee1..00000000 --- a/vendor/github.com/libp2p/go-openssl/init.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package openssl is a light wrapper around OpenSSL for Go. - -It strives to provide a near-drop-in replacement for the Go standard library -tls package, while allowing for: - -Performance - -OpenSSL is battle-tested and optimized C. While Go's built-in library shows -great promise, it is still young and in some places, inefficient. This simple -OpenSSL wrapper can often do at least 2x with the same cipher and protocol. - -On my lappytop, I get the following benchmarking speeds: - BenchmarkSHA1Large_openssl 1000 2611282 ns/op 401.56 MB/s - BenchmarkSHA1Large_stdlib 500 3963983 ns/op 264.53 MB/s - BenchmarkSHA1Small_openssl 1000000 3476 ns/op 0.29 MB/s - BenchmarkSHA1Small_stdlib 5000000 550 ns/op 1.82 MB/s - BenchmarkSHA256Large_openssl 200 8085314 ns/op 129.69 MB/s - BenchmarkSHA256Large_stdlib 100 18948189 ns/op 55.34 MB/s - BenchmarkSHA256Small_openssl 1000000 4262 ns/op 0.23 MB/s - BenchmarkSHA256Small_stdlib 1000000 1444 ns/op 0.69 MB/s - BenchmarkOpenSSLThroughput 100000 21634 ns/op 47.33 MB/s - BenchmarkStdlibThroughput 50000 58974 ns/op 17.36 MB/s - -Interoperability - -Many systems support OpenSSL with a variety of plugins and modules for things, -such as hardware acceleration in embedded devices. - -Greater flexibility and configuration - -OpenSSL allows for far greater configuration of corner cases and backwards -compatibility (such as support of SSLv2). You shouldn't be using SSLv2 if you -can help but, but sometimes you can't help it. - -Security - -Yeah yeah, Heartbleed. But according to the author of the standard library's -TLS implementation, Go's TLS library is vulnerable to timing attacks. And -whether or not OpenSSL received the appropriate amount of scrutiny -pre-Heartbleed, it sure is receiving it now. - -Usage - -Starting an HTTP server that uses OpenSSL is very easy. It's as simple as: - log.Fatal(openssl.ListenAndServeTLS( - ":8443", "my_server.crt", "my_server.key", myHandler)) - -Getting a net.Listener that uses OpenSSL is also easy: - ctx, err := openssl.NewCtxFromFiles("my_server.crt", "my_server.key") - if err != nil { - log.Fatal(err) - } - l, err := openssl.Listen("tcp", ":7777", ctx) - -Making a client connection is straightforward too: - ctx, err := NewCtx() - if err != nil { - log.Fatal(err) - } - err = ctx.LoadVerifyLocations("/etc/ssl/certs/ca-certificates.crt", "") - if err != nil { - log.Fatal(err) - } - conn, err := openssl.Dial("tcp", "localhost:7777", ctx, 0) - -Help wanted: To get this library to work with net/http's client, we -had to fork net/http. It would be nice if an alternate http client library -supported the generality needed to use OpenSSL instead of crypto/tls. -*/ -package openssl - -// #include "shim.h" -import "C" - -import ( - "fmt" - "strings" -) - -func init() { - if rc := C.X_shim_init(); rc != 0 { - panic(fmt.Errorf("x_shim_init failed with %d", rc)) - } -} - -// errorFromErrorQueue needs to run in the same OS thread as the operation -// that caused the possible error -func errorFromErrorQueue() error { - var errs []string - for { - err := C.ERR_get_error() - if err == 0 { - break - } - errs = append(errs, fmt.Sprintf("%s:%s:%s", - C.GoString(C.ERR_lib_error_string(err)), - C.GoString(C.ERR_func_error_string(err)), - C.GoString(C.ERR_reason_error_string(err)))) - } - return fmt.Errorf("SSL errors: %s", strings.Join(errs, "\n")) -} diff --git a/vendor/github.com/libp2p/go-openssl/init_posix.go b/vendor/github.com/libp2p/go-openssl/init_posix.go deleted file mode 100644 index f518d2f8..00000000 --- a/vendor/github.com/libp2p/go-openssl/init_posix.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build (linux || darwin || solaris || freebsd || openbsd) && !windows -// +build linux darwin solaris freebsd openbsd -// +build !windows - -package openssl - -/* -#include -#include -#include - -pthread_mutex_t* goopenssl_locks; - -int go_init_locks() { - int rc = 0; - int nlock; - int i; - int locks_needed = CRYPTO_num_locks(); - - goopenssl_locks = (pthread_mutex_t*)malloc( - sizeof(pthread_mutex_t) * locks_needed); - if (!goopenssl_locks) { - return ENOMEM; - } - for (nlock = 0; nlock < locks_needed; ++nlock) { - rc = pthread_mutex_init(&goopenssl_locks[nlock], NULL); - if (rc != 0) { - break; - } - } - - if (rc != 0) { - for (i = nlock - 1; i >= 0; --i) { - pthread_mutex_destroy(&goopenssl_locks[i]); - } - free(goopenssl_locks); - goopenssl_locks = NULL; - } - return rc; -} - -void go_thread_locking_callback(int mode, int n, const char *file, - int line) { - if (mode & CRYPTO_LOCK) { - pthread_mutex_lock(&goopenssl_locks[n]); - } else { - pthread_mutex_unlock(&goopenssl_locks[n]); - } -} - -unsigned long go_thread_id_callback(void) { - return (unsigned long)pthread_self(); -} -*/ -import "C" diff --git a/vendor/github.com/libp2p/go-openssl/init_windows.go b/vendor/github.com/libp2p/go-openssl/init_windows.go deleted file mode 100644 index 7356b6e2..00000000 --- a/vendor/github.com/libp2p/go-openssl/init_windows.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build windows -// +build windows - -package openssl - -/* -#include -#include -#include - -CRITICAL_SECTION* goopenssl_locks; - -int go_init_locks() { - int rc = 0; - int nlock; - int i; - int locks_needed = CRYPTO_num_locks(); - - goopenssl_locks = (CRITICAL_SECTION*)malloc( - sizeof(*goopenssl_locks) * locks_needed); - if (!goopenssl_locks) { - return ENOMEM; - } - for (nlock = 0; nlock < locks_needed; ++nlock) { - InitializeCriticalSection(&goopenssl_locks[nlock]); - } - - return 0; -} - -void go_thread_locking_callback(int mode, int n, const char *file, - int line) { - if (mode & CRYPTO_LOCK) { - EnterCriticalSection(&goopenssl_locks[n]); - } else { - LeaveCriticalSection(&goopenssl_locks[n]); - } -} - -unsigned long go_thread_id_callback(void) { - return (unsigned long)GetCurrentThreadId(); -} -*/ -import "C" diff --git a/vendor/github.com/libp2p/go-openssl/key.go b/vendor/github.com/libp2p/go-openssl/key.go deleted file mode 100644 index 25be635b..00000000 --- a/vendor/github.com/libp2p/go-openssl/key.go +++ /dev/null @@ -1,522 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -// #include "shim.h" -import "C" - -import ( - "errors" - "io/ioutil" - "runtime" - "unsafe" -) - -var ( // some (effectively) constants for tests to refer to - ed25519_support = C.X_ED25519_SUPPORT != 0 -) - -type Method *C.EVP_MD - -var ( - SHA1_Method Method = C.X_EVP_sha1() - SHA256_Method Method = C.X_EVP_sha256() - SHA512_Method Method = C.X_EVP_sha512() -) - -// Constants for the various key types. -// Mapping of name -> NID taken from openssl/evp.h -const ( - KeyTypeNone = NID_undef - KeyTypeRSA = NID_rsaEncryption - KeyTypeRSA2 = NID_rsa - KeyTypeDSA = NID_dsa - KeyTypeDSA1 = NID_dsa_2 - KeyTypeDSA2 = NID_dsaWithSHA - KeyTypeDSA3 = NID_dsaWithSHA1 - KeyTypeDSA4 = NID_dsaWithSHA1_2 - KeyTypeDH = NID_dhKeyAgreement - KeyTypeDHX = NID_dhpublicnumber - KeyTypeEC = NID_X9_62_id_ecPublicKey - KeyTypeHMAC = NID_hmac - KeyTypeCMAC = NID_cmac - KeyTypeTLS1PRF = NID_tls1_prf - KeyTypeHKDF = NID_hkdf - KeyTypeX25519 = NID_X25519 - KeyTypeX448 = NID_X448 - KeyTypeED25519 = NID_ED25519 - KeyTypeED448 = NID_ED448 -) - -type PublicKey interface { - // Verifies the data signature using PKCS1.15 - VerifyPKCS1v15(method Method, data, sig []byte) error - - // MarshalPKIXPublicKeyPEM converts the public key to PEM-encoded PKIX - // format - MarshalPKIXPublicKeyPEM() (pem_block []byte, err error) - - // MarshalPKIXPublicKeyDER converts the public key to DER-encoded PKIX - // format - MarshalPKIXPublicKeyDER() (der_block []byte, err error) - - // KeyType returns an identifier for what kind of key is represented by this - // object. - KeyType() NID - - // BaseType returns an identifier for what kind of key is represented - // by this object. - // Keys that share same algorithm but use different legacy formats - // will have the same BaseType. - // - // For example, a key with a `KeyType() == KeyTypeRSA` and a key with a - // `KeyType() == KeyTypeRSA2` would both have `BaseType() == KeyTypeRSA`. - BaseType() NID - - // Equal compares the key with the passed in key. - Equal(key PublicKey) bool - - // Size returns the size (in bytes) of signatures created with this key. - Size() int - - evpPKey() *C.EVP_PKEY -} - -type PrivateKey interface { - PublicKey - - // Signs the data using PKCS1.15 - SignPKCS1v15(Method, []byte) ([]byte, error) - - // MarshalPKCS1PrivateKeyPEM converts the private key to PEM-encoded PKCS1 - // format - MarshalPKCS1PrivateKeyPEM() (pem_block []byte, err error) - - // MarshalPKCS1PrivateKeyDER converts the private key to DER-encoded PKCS1 - // format - MarshalPKCS1PrivateKeyDER() (der_block []byte, err error) -} - -type pKey struct { - key *C.EVP_PKEY -} - -func (key *pKey) evpPKey() *C.EVP_PKEY { return key.key } - -func (key *pKey) Equal(other PublicKey) bool { - return C.EVP_PKEY_cmp(key.key, other.evpPKey()) == 1 -} - -func (key *pKey) KeyType() NID { - return NID(C.EVP_PKEY_id(key.key)) -} - -func (key *pKey) Size() int { - return int(C.EVP_PKEY_size(key.key)) -} - -func (key *pKey) BaseType() NID { - return NID(C.EVP_PKEY_base_id(key.key)) -} - -func (key *pKey) SignPKCS1v15(method Method, data []byte) ([]byte, error) { - - ctx := C.X_EVP_MD_CTX_new() - defer C.X_EVP_MD_CTX_free(ctx) - - if key.KeyType() == KeyTypeED25519 { - // do ED specific one-shot sign - - if method != nil || len(data) == 0 { - return nil, errors.New("signpkcs1v15: 0-length data or non-null digest") - } - - if C.X_EVP_DigestSignInit(ctx, nil, nil, nil, key.key) != 1 { - return nil, errors.New("signpkcs1v15: failed to init signature") - } - - // evp signatures are 64 bytes - sig := make([]byte, 64) - var sigblen C.size_t = 64 - if C.X_EVP_DigestSign(ctx, - (*C.uchar)(unsafe.Pointer(&sig[0])), - &sigblen, - (*C.uchar)(unsafe.Pointer(&data[0])), - C.size_t(len(data))) != 1 { - return nil, errors.New("signpkcs1v15: failed to do one-shot signature") - } - - return sig[:sigblen], nil - } else { - if C.X_EVP_SignInit(ctx, method) != 1 { - return nil, errors.New("signpkcs1v15: failed to init signature") - } - if len(data) > 0 { - if C.X_EVP_SignUpdate( - ctx, unsafe.Pointer(&data[0]), C.uint(len(data))) != 1 { - return nil, errors.New("signpkcs1v15: failed to update signature") - } - } - sig := make([]byte, C.X_EVP_PKEY_size(key.key)) - var sigblen C.uint - if C.X_EVP_SignFinal(ctx, - (*C.uchar)(unsafe.Pointer(&sig[0])), &sigblen, key.key) != 1 { - return nil, errors.New("signpkcs1v15: failed to finalize signature") - } - return sig[:sigblen], nil - } -} - -func (key *pKey) VerifyPKCS1v15(method Method, data, sig []byte) error { - ctx := C.X_EVP_MD_CTX_new() - defer C.X_EVP_MD_CTX_free(ctx) - - if len(sig) == 0 { - return errors.New("verifypkcs1v15: 0-length sig") - } - - if key.KeyType() == KeyTypeED25519 { - // do ED specific one-shot sign - - if method != nil || len(data) == 0 { - return errors.New("verifypkcs1v15: 0-length data or non-null digest") - } - - if C.X_EVP_DigestVerifyInit(ctx, nil, nil, nil, key.key) != 1 { - return errors.New("verifypkcs1v15: failed to init verify") - } - - if C.X_EVP_DigestVerify(ctx, - (*C.uchar)(unsafe.Pointer(&sig[0])), - C.size_t(len(sig)), - (*C.uchar)(unsafe.Pointer(&data[0])), - C.size_t(len(data))) != 1 { - return errors.New("verifypkcs1v15: failed to do one-shot verify") - } - - return nil - - } else { - if C.X_EVP_VerifyInit(ctx, method) != 1 { - return errors.New("verifypkcs1v15: failed to init verify") - } - if len(data) > 0 { - if C.X_EVP_VerifyUpdate( - ctx, unsafe.Pointer(&data[0]), C.uint(len(data))) != 1 { - return errors.New("verifypkcs1v15: failed to update verify") - } - } - if C.X_EVP_VerifyFinal(ctx, - (*C.uchar)(unsafe.Pointer(&sig[0])), C.uint(len(sig)), key.key) != 1 { - return errors.New("verifypkcs1v15: failed to finalize verify") - } - return nil - } -} - -func (key *pKey) MarshalPKCS1PrivateKeyPEM() (pem_block []byte, - err error) { - bio := C.BIO_new(C.BIO_s_mem()) - if bio == nil { - return nil, errors.New("failed to allocate memory BIO") - } - defer C.BIO_free(bio) - - // PEM_write_bio_PrivateKey_traditional will use the key-specific PKCS1 - // format if one is available for that key type, otherwise it will encode - // to a PKCS8 key. - if int(C.X_PEM_write_bio_PrivateKey_traditional(bio, key.key, nil, nil, - C.int(0), nil, nil)) != 1 { - return nil, errors.New("failed dumping private key") - } - - return ioutil.ReadAll(asAnyBio(bio)) -} - -func (key *pKey) MarshalPKCS1PrivateKeyDER() (der_block []byte, - err error) { - bio := C.BIO_new(C.BIO_s_mem()) - if bio == nil { - return nil, errors.New("failed to allocate memory BIO") - } - defer C.BIO_free(bio) - - if int(C.i2d_PrivateKey_bio(bio, key.key)) != 1 { - return nil, errors.New("failed dumping private key der") - } - - return ioutil.ReadAll(asAnyBio(bio)) -} - -func (key *pKey) MarshalPKIXPublicKeyPEM() (pem_block []byte, - err error) { - bio := C.BIO_new(C.BIO_s_mem()) - if bio == nil { - return nil, errors.New("failed to allocate memory BIO") - } - defer C.BIO_free(bio) - - if int(C.PEM_write_bio_PUBKEY(bio, key.key)) != 1 { - return nil, errors.New("failed dumping public key pem") - } - - return ioutil.ReadAll(asAnyBio(bio)) -} - -func (key *pKey) MarshalPKIXPublicKeyDER() (der_block []byte, - err error) { - bio := C.BIO_new(C.BIO_s_mem()) - if bio == nil { - return nil, errors.New("failed to allocate memory BIO") - } - defer C.BIO_free(bio) - - if int(C.i2d_PUBKEY_bio(bio, key.key)) != 1 { - return nil, errors.New("failed dumping public key der") - } - - return ioutil.ReadAll(asAnyBio(bio)) -} - -// LoadPrivateKeyFromPEM loads a private key from a PEM-encoded block. -func LoadPrivateKeyFromPEM(pem_block []byte) (PrivateKey, error) { - if len(pem_block) == 0 { - return nil, errors.New("empty pem block") - } - bio := C.BIO_new_mem_buf(unsafe.Pointer(&pem_block[0]), - C.int(len(pem_block))) - if bio == nil { - return nil, errors.New("failed creating bio") - } - defer C.BIO_free(bio) - - key := C.PEM_read_bio_PrivateKey(bio, nil, nil, nil) - if key == nil { - return nil, errors.New("failed reading private key") - } - - p := &pKey{key: key} - runtime.SetFinalizer(p, func(p *pKey) { - C.X_EVP_PKEY_free(p.key) - }) - return p, nil -} - -// LoadPrivateKeyFromPEMWithPassword loads a private key from a PEM-encoded block. -func LoadPrivateKeyFromPEMWithPassword(pem_block []byte, password string) ( - PrivateKey, error) { - if len(pem_block) == 0 { - return nil, errors.New("empty pem block") - } - bio := C.BIO_new_mem_buf(unsafe.Pointer(&pem_block[0]), - C.int(len(pem_block))) - if bio == nil { - return nil, errors.New("failed creating bio") - } - defer C.BIO_free(bio) - cs := C.CString(password) - defer C.free(unsafe.Pointer(cs)) - key := C.PEM_read_bio_PrivateKey(bio, nil, nil, unsafe.Pointer(cs)) - if key == nil { - return nil, errors.New("failed reading private key") - } - - p := &pKey{key: key} - runtime.SetFinalizer(p, func(p *pKey) { - C.X_EVP_PKEY_free(p.key) - }) - return p, nil -} - -// LoadPrivateKeyFromDER loads a private key from a DER-encoded block. -func LoadPrivateKeyFromDER(der_block []byte) (PrivateKey, error) { - if len(der_block) == 0 { - return nil, errors.New("empty der block") - } - bio := C.BIO_new_mem_buf(unsafe.Pointer(&der_block[0]), - C.int(len(der_block))) - if bio == nil { - return nil, errors.New("failed creating bio") - } - defer C.BIO_free(bio) - - key := C.d2i_PrivateKey_bio(bio, nil) - if key == nil { - return nil, errors.New("failed reading private key der") - } - - p := &pKey{key: key} - runtime.SetFinalizer(p, func(p *pKey) { - C.X_EVP_PKEY_free(p.key) - }) - return p, nil -} - -// LoadPrivateKeyFromPEMWidthPassword loads a private key from a PEM-encoded block. -// Backwards-compatible with typo -func LoadPrivateKeyFromPEMWidthPassword(pem_block []byte, password string) ( - PrivateKey, error) { - return LoadPrivateKeyFromPEMWithPassword(pem_block, password) -} - -// LoadPublicKeyFromPEM loads a public key from a PEM-encoded block. -func LoadPublicKeyFromPEM(pem_block []byte) (PublicKey, error) { - if len(pem_block) == 0 { - return nil, errors.New("empty pem block") - } - bio := C.BIO_new_mem_buf(unsafe.Pointer(&pem_block[0]), - C.int(len(pem_block))) - if bio == nil { - return nil, errors.New("failed creating bio") - } - defer C.BIO_free(bio) - - key := C.PEM_read_bio_PUBKEY(bio, nil, nil, nil) - if key == nil { - return nil, errors.New("failed reading public key der") - } - - p := &pKey{key: key} - runtime.SetFinalizer(p, func(p *pKey) { - C.X_EVP_PKEY_free(p.key) - }) - return p, nil -} - -// LoadPublicKeyFromDER loads a public key from a DER-encoded block. -func LoadPublicKeyFromDER(der_block []byte) (PublicKey, error) { - if len(der_block) == 0 { - return nil, errors.New("empty der block") - } - bio := C.BIO_new_mem_buf(unsafe.Pointer(&der_block[0]), - C.int(len(der_block))) - if bio == nil { - return nil, errors.New("failed creating bio") - } - defer C.BIO_free(bio) - - key := C.d2i_PUBKEY_bio(bio, nil) - if key == nil { - return nil, errors.New("failed reading public key der") - } - - p := &pKey{key: key} - runtime.SetFinalizer(p, func(p *pKey) { - C.X_EVP_PKEY_free(p.key) - }) - return p, nil -} - -// GenerateRSAKey generates a new RSA private key with an exponent of 3. -func GenerateRSAKey(bits int) (PrivateKey, error) { - return GenerateRSAKeyWithExponent(bits, 3) -} - -// GenerateRSAKeyWithExponent generates a new RSA private key. -func GenerateRSAKeyWithExponent(bits int, exponent int) (PrivateKey, error) { - rsa := C.RSA_generate_key(C.int(bits), C.ulong(exponent), nil, nil) - if rsa == nil { - return nil, errors.New("failed to generate RSA key") - } - key := C.X_EVP_PKEY_new() - if key == nil { - return nil, errors.New("failed to allocate EVP_PKEY") - } - if C.X_EVP_PKEY_assign_charp(key, C.EVP_PKEY_RSA, (*C.char)(unsafe.Pointer(rsa))) != 1 { - C.X_EVP_PKEY_free(key) - return nil, errors.New("failed to assign RSA key") - } - p := &pKey{key: key} - runtime.SetFinalizer(p, func(p *pKey) { - C.X_EVP_PKEY_free(p.key) - }) - return p, nil -} - -// GenerateECKey generates a new elliptic curve private key on the speicified -// curve. -func GenerateECKey(curve EllipticCurve) (PrivateKey, error) { - - // Create context for parameter generation - paramCtx := C.EVP_PKEY_CTX_new_id(C.EVP_PKEY_EC, nil) - if paramCtx == nil { - return nil, errors.New("failed creating EC parameter generation context") - } - defer C.EVP_PKEY_CTX_free(paramCtx) - - // Intialize the parameter generation - if int(C.EVP_PKEY_paramgen_init(paramCtx)) != 1 { - return nil, errors.New("failed initializing EC parameter generation context") - } - - // Set curve in EC parameter generation context - if int(C.X_EVP_PKEY_CTX_set_ec_paramgen_curve_nid(paramCtx, C.int(curve))) != 1 { - return nil, errors.New("failed setting curve in EC parameter generation context") - } - - // Create parameter object - var params *C.EVP_PKEY - if int(C.EVP_PKEY_paramgen(paramCtx, ¶ms)) != 1 { - return nil, errors.New("failed creating EC key generation parameters") - } - defer C.EVP_PKEY_free(params) - - // Create context for the key generation - keyCtx := C.EVP_PKEY_CTX_new(params, nil) - if keyCtx == nil { - return nil, errors.New("failed creating EC key generation context") - } - defer C.EVP_PKEY_CTX_free(keyCtx) - - // Generate the key - var privKey *C.EVP_PKEY - if int(C.EVP_PKEY_keygen_init(keyCtx)) != 1 { - return nil, errors.New("failed initializing EC key generation context") - } - if int(C.EVP_PKEY_keygen(keyCtx, &privKey)) != 1 { - return nil, errors.New("failed generating EC private key") - } - - p := &pKey{key: privKey} - runtime.SetFinalizer(p, func(p *pKey) { - C.X_EVP_PKEY_free(p.key) - }) - return p, nil -} - -// GenerateED25519Key generates a Ed25519 key -func GenerateED25519Key() (PrivateKey, error) { - // Key context - keyCtx := C.EVP_PKEY_CTX_new_id(C.X_EVP_PKEY_ED25519, nil) - if keyCtx == nil { - return nil, errors.New("failed creating EC parameter generation context") - } - defer C.EVP_PKEY_CTX_free(keyCtx) - - // Generate the key - var privKey *C.EVP_PKEY - if int(C.EVP_PKEY_keygen_init(keyCtx)) != 1 { - return nil, errors.New("failed initializing ED25519 key generation context") - } - if int(C.EVP_PKEY_keygen(keyCtx, &privKey)) != 1 { - return nil, errors.New("failed generating ED25519 private key") - } - - p := &pKey{key: privKey} - runtime.SetFinalizer(p, func(p *pKey) { - C.X_EVP_PKEY_free(p.key) - }) - return p, nil -} diff --git a/vendor/github.com/libp2p/go-openssl/mapping.go b/vendor/github.com/libp2p/go-openssl/mapping.go deleted file mode 100644 index d78cc703..00000000 --- a/vendor/github.com/libp2p/go-openssl/mapping.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -import ( - "sync" - "unsafe" -) - -// #include -import "C" - -type mapping struct { - lock sync.Mutex - values map[token]unsafe.Pointer -} - -func newMapping() *mapping { - return &mapping{ - values: make(map[token]unsafe.Pointer), - } -} - -type token unsafe.Pointer - -func (m *mapping) Add(x unsafe.Pointer) token { - res := token(C.malloc(1)) - - m.lock.Lock() - m.values[res] = x - m.lock.Unlock() - - return res -} - -func (m *mapping) Get(x token) unsafe.Pointer { - m.lock.Lock() - res := m.values[x] - m.lock.Unlock() - - return res -} - -func (m *mapping) Del(x token) { - m.lock.Lock() - delete(m.values, x) - m.lock.Unlock() - - C.free(unsafe.Pointer(x)) -} diff --git a/vendor/github.com/libp2p/go-openssl/md4.go b/vendor/github.com/libp2p/go-openssl/md4.go deleted file mode 100644 index 95d9d2d2..00000000 --- a/vendor/github.com/libp2p/go-openssl/md4.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -// #include "shim.h" -import "C" - -import ( - "errors" - "runtime" - "unsafe" -) - -type MD4Hash struct { - ctx *C.EVP_MD_CTX - engine *Engine -} - -func NewMD4Hash() (*MD4Hash, error) { return NewMD4HashWithEngine(nil) } - -func NewMD4HashWithEngine(e *Engine) (*MD4Hash, error) { - hash := &MD4Hash{engine: e} - hash.ctx = C.X_EVP_MD_CTX_new() - if hash.ctx == nil { - return nil, errors.New("openssl: md4: unable to allocate ctx") - } - runtime.SetFinalizer(hash, func(hash *MD4Hash) { hash.Close() }) - if err := hash.Reset(); err != nil { - return nil, err - } - return hash, nil -} - -func (s *MD4Hash) Close() { - if s.ctx != nil { - C.X_EVP_MD_CTX_free(s.ctx) - s.ctx = nil - } -} - -func (s *MD4Hash) Reset() error { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - if C.X_EVP_DigestInit_ex(s.ctx, C.X_EVP_md4(), engineRef(s.engine)) != 1 { - return errors.New("openssl: md4: cannot init digest ctx: " + - errorFromErrorQueue().Error()) - } - return nil -} - -func (s *MD4Hash) Write(p []byte) (n int, err error) { - if len(p) == 0 { - return 0, nil - } - if C.X_EVP_DigestUpdate(s.ctx, unsafe.Pointer(&p[0]), - C.size_t(len(p))) != 1 { - return 0, errors.New("openssl: md4: cannot update digest") - } - return len(p), nil -} - -func (s *MD4Hash) Sum() (result [16]byte, err error) { - if C.X_EVP_DigestFinal_ex(s.ctx, - (*C.uchar)(unsafe.Pointer(&result[0])), nil) != 1 { - return result, errors.New("openssl: md4: cannot finalize ctx") - } - return result, s.Reset() -} - -func MD4(data []byte) (result [16]byte, err error) { - hash, err := NewMD4Hash() - if err != nil { - return result, err - } - defer hash.Close() - if _, err := hash.Write(data); err != nil { - return result, err - } - return hash.Sum() -} diff --git a/vendor/github.com/libp2p/go-openssl/md5.go b/vendor/github.com/libp2p/go-openssl/md5.go deleted file mode 100644 index d7e771ee..00000000 --- a/vendor/github.com/libp2p/go-openssl/md5.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -// #include "shim.h" -import "C" - -import ( - "errors" - "runtime" - "unsafe" -) - -type MD5Hash struct { - ctx *C.EVP_MD_CTX - engine *Engine -} - -func NewMD5Hash() (*MD5Hash, error) { return NewMD5HashWithEngine(nil) } - -func NewMD5HashWithEngine(e *Engine) (*MD5Hash, error) { - hash := &MD5Hash{engine: e} - hash.ctx = C.X_EVP_MD_CTX_new() - if hash.ctx == nil { - return nil, errors.New("openssl: md5: unable to allocate ctx") - } - runtime.SetFinalizer(hash, func(hash *MD5Hash) { hash.Close() }) - if err := hash.Reset(); err != nil { - return nil, err - } - return hash, nil -} - -func (s *MD5Hash) Close() { - if s.ctx != nil { - C.X_EVP_MD_CTX_free(s.ctx) - s.ctx = nil - } -} - -func (s *MD5Hash) Reset() error { - if C.X_EVP_DigestInit_ex(s.ctx, C.X_EVP_md5(), engineRef(s.engine)) != 1 { - return errors.New("openssl: md5: cannot init digest ctx") - } - return nil -} - -func (s *MD5Hash) Write(p []byte) (n int, err error) { - if len(p) == 0 { - return 0, nil - } - if C.X_EVP_DigestUpdate(s.ctx, unsafe.Pointer(&p[0]), - C.size_t(len(p))) != 1 { - return 0, errors.New("openssl: md5: cannot update digest") - } - return len(p), nil -} - -func (s *MD5Hash) Sum() (result [16]byte, err error) { - if C.X_EVP_DigestFinal_ex(s.ctx, - (*C.uchar)(unsafe.Pointer(&result[0])), nil) != 1 { - return result, errors.New("openssl: md5: cannot finalize ctx") - } - return result, s.Reset() -} - -func MD5(data []byte) (result [16]byte, err error) { - hash, err := NewMD5Hash() - if err != nil { - return result, err - } - defer hash.Close() - if _, err := hash.Write(data); err != nil { - return result, err - } - return hash.Sum() -} diff --git a/vendor/github.com/libp2p/go-openssl/net.go b/vendor/github.com/libp2p/go-openssl/net.go deleted file mode 100644 index b2293c7c..00000000 --- a/vendor/github.com/libp2p/go-openssl/net.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -import ( - "errors" - "net" - "time" -) - -type listener struct { - net.Listener - ctx *Ctx -} - -func (l *listener) Accept() (c net.Conn, err error) { - c, err = l.Listener.Accept() - if err != nil { - return nil, err - } - ssl_c, err := Server(c, l.ctx) - if err != nil { - c.Close() - return nil, err - } - return ssl_c, nil -} - -// NewListener wraps an existing net.Listener such that all accepted -// connections are wrapped as OpenSSL server connections using the provided -// context ctx. -func NewListener(inner net.Listener, ctx *Ctx) net.Listener { - return &listener{ - Listener: inner, - ctx: ctx} -} - -// Listen is a wrapper around net.Listen that wraps incoming connections with -// an OpenSSL server connection using the provided context ctx. -func Listen(network, laddr string, ctx *Ctx) (net.Listener, error) { - if ctx == nil { - return nil, errors.New("no ssl context provided") - } - l, err := net.Listen(network, laddr) - if err != nil { - return nil, err - } - return NewListener(l, ctx), nil -} - -type DialFlags int - -const ( - InsecureSkipHostVerification DialFlags = 1 << iota - DisableSNI -) - -// Dial will connect to network/address and then wrap the corresponding -// underlying connection with an OpenSSL client connection using context ctx. -// If flags includes InsecureSkipHostVerification, the server certificate's -// hostname will not be checked to match the hostname in addr. Otherwise, flags -// should be 0. -// -// Dial probably won't work for you unless you set a verify location or add -// some certs to the certificate store of the client context you're using. -// This library is not nice enough to use the system certificate store by -// default for you yet. -func Dial(network, addr string, ctx *Ctx, flags DialFlags) (*Conn, error) { - return DialSession(network, addr, ctx, flags, nil) -} - -// DialTimeout acts like Dial but takes a timeout for network dial. -// -// The timeout includes only network dial. It does not include OpenSSL calls. -// -// See func Dial for a description of the network, addr, ctx and flags -// parameters. -func DialTimeout(network, addr string, timeout time.Duration, ctx *Ctx, - flags DialFlags) (*Conn, error) { - d := net.Dialer{Timeout: timeout} - return dialSession(d, network, addr, ctx, flags, nil) -} - -// DialSession will connect to network/address and then wrap the corresponding -// underlying connection with an OpenSSL client connection using context ctx. -// If flags includes InsecureSkipHostVerification, the server certificate's -// hostname will not be checked to match the hostname in addr. Otherwise, flags -// should be 0. -// -// Dial probably won't work for you unless you set a verify location or add -// some certs to the certificate store of the client context you're using. -// This library is not nice enough to use the system certificate store by -// default for you yet. -// -// If session is not nil it will be used to resume the tls state. The session -// can be retrieved from the GetSession method on the Conn. -func DialSession(network, addr string, ctx *Ctx, flags DialFlags, - session []byte) (*Conn, error) { - var d net.Dialer - return dialSession(d, network, addr, ctx, flags, session) -} - -func dialSession(d net.Dialer, network, addr string, ctx *Ctx, flags DialFlags, - session []byte) (*Conn, error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - if ctx == nil { - var err error - ctx, err = NewCtx() - if err != nil { - return nil, err - } - // TODO: use operating system default certificate chain? - } - - c, err := d.Dial(network, addr) - if err != nil { - return nil, err - } - conn, err := Client(c, ctx) - if err != nil { - c.Close() - return nil, err - } - if session != nil { - err := conn.setSession(session) - if err != nil { - c.Close() - return nil, err - } - } - if flags&DisableSNI == 0 { - err = conn.SetTlsExtHostName(host) - if err != nil { - conn.Close() - return nil, err - } - } - err = conn.Handshake() - if err != nil { - conn.Close() - return nil, err - } - if flags&InsecureSkipHostVerification == 0 { - err = conn.VerifyHostname(host) - if err != nil { - conn.Close() - return nil, err - } - } - return conn, nil -} diff --git a/vendor/github.com/libp2p/go-openssl/nid.go b/vendor/github.com/libp2p/go-openssl/nid.go deleted file mode 100644 index 936a52e7..00000000 --- a/vendor/github.com/libp2p/go-openssl/nid.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -type NID int - -const ( - NID_undef NID = 0 - NID_rsadsi NID = 1 - NID_pkcs NID = 2 - NID_md2 NID = 3 - NID_md5 NID = 4 - NID_rc4 NID = 5 - NID_rsaEncryption NID = 6 - NID_md2WithRSAEncryption NID = 7 - NID_md5WithRSAEncryption NID = 8 - NID_pbeWithMD2AndDES_CBC NID = 9 - NID_pbeWithMD5AndDES_CBC NID = 10 - NID_X500 NID = 11 - NID_X509 NID = 12 - NID_commonName NID = 13 - NID_countryName NID = 14 - NID_localityName NID = 15 - NID_stateOrProvinceName NID = 16 - NID_organizationName NID = 17 - NID_organizationalUnitName NID = 18 - NID_rsa NID = 19 - NID_pkcs7 NID = 20 - NID_pkcs7_data NID = 21 - NID_pkcs7_signed NID = 22 - NID_pkcs7_enveloped NID = 23 - NID_pkcs7_signedAndEnveloped NID = 24 - NID_pkcs7_digest NID = 25 - NID_pkcs7_encrypted NID = 26 - NID_pkcs3 NID = 27 - NID_dhKeyAgreement NID = 28 - NID_des_ecb NID = 29 - NID_des_cfb64 NID = 30 - NID_des_cbc NID = 31 - NID_des_ede NID = 32 - NID_des_ede3 NID = 33 - NID_idea_cbc NID = 34 - NID_idea_cfb64 NID = 35 - NID_idea_ecb NID = 36 - NID_rc2_cbc NID = 37 - NID_rc2_ecb NID = 38 - NID_rc2_cfb64 NID = 39 - NID_rc2_ofb64 NID = 40 - NID_sha NID = 41 - NID_shaWithRSAEncryption NID = 42 - NID_des_ede_cbc NID = 43 - NID_des_ede3_cbc NID = 44 - NID_des_ofb64 NID = 45 - NID_idea_ofb64 NID = 46 - NID_pkcs9 NID = 47 - NID_pkcs9_emailAddress NID = 48 - NID_pkcs9_unstructuredName NID = 49 - NID_pkcs9_contentType NID = 50 - NID_pkcs9_messageDigest NID = 51 - NID_pkcs9_signingTime NID = 52 - NID_pkcs9_countersignature NID = 53 - NID_pkcs9_challengePassword NID = 54 - NID_pkcs9_unstructuredAddress NID = 55 - NID_pkcs9_extCertAttributes NID = 56 - NID_netscape NID = 57 - NID_netscape_cert_extension NID = 58 - NID_netscape_data_type NID = 59 - NID_des_ede_cfb64 NID = 60 - NID_des_ede3_cfb64 NID = 61 - NID_des_ede_ofb64 NID = 62 - NID_des_ede3_ofb64 NID = 63 - NID_sha1 NID = 64 - NID_sha1WithRSAEncryption NID = 65 - NID_dsaWithSHA NID = 66 - NID_dsa_2 NID = 67 - NID_pbeWithSHA1AndRC2_CBC NID = 68 - NID_id_pbkdf2 NID = 69 - NID_dsaWithSHA1_2 NID = 70 - NID_netscape_cert_type NID = 71 - NID_netscape_base_url NID = 72 - NID_netscape_revocation_url NID = 73 - NID_netscape_ca_revocation_url NID = 74 - NID_netscape_renewal_url NID = 75 - NID_netscape_ca_policy_url NID = 76 - NID_netscape_ssl_server_name NID = 77 - NID_netscape_comment NID = 78 - NID_netscape_cert_sequence NID = 79 - NID_desx_cbc NID = 80 - NID_id_ce NID = 81 - NID_subject_key_identifier NID = 82 - NID_key_usage NID = 83 - NID_private_key_usage_period NID = 84 - NID_subject_alt_name NID = 85 - NID_issuer_alt_name NID = 86 - NID_basic_constraints NID = 87 - NID_crl_number NID = 88 - NID_certificate_policies NID = 89 - NID_authority_key_identifier NID = 90 - NID_bf_cbc NID = 91 - NID_bf_ecb NID = 92 - NID_bf_cfb64 NID = 93 - NID_bf_ofb64 NID = 94 - NID_mdc2 NID = 95 - NID_mdc2WithRSA NID = 96 - NID_rc4_40 NID = 97 - NID_rc2_40_cbc NID = 98 - NID_givenName NID = 99 - NID_surname NID = 100 - NID_initials NID = 101 - NID_uniqueIdentifier NID = 102 - NID_crl_distribution_points NID = 103 - NID_md5WithRSA NID = 104 - NID_serialNumber NID = 105 - NID_title NID = 106 - NID_description NID = 107 - NID_cast5_cbc NID = 108 - NID_cast5_ecb NID = 109 - NID_cast5_cfb64 NID = 110 - NID_cast5_ofb64 NID = 111 - NID_pbeWithMD5AndCast5_CBC NID = 112 - NID_dsaWithSHA1 NID = 113 - NID_md5_sha1 NID = 114 - NID_sha1WithRSA NID = 115 - NID_dsa NID = 116 - NID_ripemd160 NID = 117 - NID_ripemd160WithRSA NID = 119 - NID_rc5_cbc NID = 120 - NID_rc5_ecb NID = 121 - NID_rc5_cfb64 NID = 122 - NID_rc5_ofb64 NID = 123 - NID_rle_compression NID = 124 - NID_zlib_compression NID = 125 - NID_ext_key_usage NID = 126 - NID_id_pkix NID = 127 - NID_id_kp NID = 128 - NID_server_auth NID = 129 - NID_client_auth NID = 130 - NID_code_sign NID = 131 - NID_email_protect NID = 132 - NID_time_stamp NID = 133 - NID_ms_code_ind NID = 134 - NID_ms_code_com NID = 135 - NID_ms_ctl_sign NID = 136 - NID_ms_sgc NID = 137 - NID_ms_efs NID = 138 - NID_ns_sgc NID = 139 - NID_delta_crl NID = 140 - NID_crl_reason NID = 141 - NID_invalidity_date NID = 142 - NID_sxnet NID = 143 - NID_pbe_WithSHA1And128BitRC4 NID = 144 - NID_pbe_WithSHA1And40BitRC4 NID = 145 - NID_pbe_WithSHA1And3_Key_TripleDES_CBC NID = 146 - NID_pbe_WithSHA1And2_Key_TripleDES_CBC NID = 147 - NID_pbe_WithSHA1And128BitRC2_CBC NID = 148 - NID_pbe_WithSHA1And40BitRC2_CBC NID = 149 - NID_keyBag NID = 150 - NID_pkcs8ShroudedKeyBag NID = 151 - NID_certBag NID = 152 - NID_crlBag NID = 153 - NID_secretBag NID = 154 - NID_safeContentsBag NID = 155 - NID_friendlyName NID = 156 - NID_localKeyID NID = 157 - NID_x509Certificate NID = 158 - NID_sdsiCertificate NID = 159 - NID_x509Crl NID = 160 - NID_pbes2 NID = 161 - NID_pbmac1 NID = 162 - NID_hmacWithSHA1 NID = 163 - NID_id_qt_cps NID = 164 - NID_id_qt_unotice NID = 165 - NID_rc2_64_cbc NID = 166 - NID_SMIMECapabilities NID = 167 - NID_pbeWithMD2AndRC2_CBC NID = 168 - NID_pbeWithMD5AndRC2_CBC NID = 169 - NID_pbeWithSHA1AndDES_CBC NID = 170 - NID_ms_ext_req NID = 171 - NID_ext_req NID = 172 - NID_name NID = 173 - NID_dnQualifier NID = 174 - NID_id_pe NID = 175 - NID_id_ad NID = 176 - NID_info_access NID = 177 - NID_ad_OCSP NID = 178 - NID_ad_ca_issuers NID = 179 - NID_OCSP_sign NID = 180 - NID_X9_62_id_ecPublicKey NID = 408 - NID_hmac NID = 855 - NID_cmac NID = 894 - NID_dhpublicnumber NID = 920 - NID_tls1_prf NID = 1021 - NID_hkdf NID = 1036 - NID_X25519 NID = 1034 - NID_X448 NID = 1035 - NID_ED25519 NID = 1087 - NID_ED448 NID = 1088 -) diff --git a/vendor/github.com/libp2p/go-openssl/object.go b/vendor/github.com/libp2p/go-openssl/object.go deleted file mode 100644 index 4d908e6c..00000000 --- a/vendor/github.com/libp2p/go-openssl/object.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2020. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -// #include "shim.h" -import "C" - -// CreateObjectIdentifier creates ObjectIdentifier and returns NID for the created -// ObjectIdentifier -func CreateObjectIdentifier(oid string, shortName string, longName string) NID { - return NID(C.OBJ_create(C.CString(oid), C.CString(shortName), C.CString(longName))) -} diff --git a/vendor/github.com/libp2p/go-openssl/pem.go b/vendor/github.com/libp2p/go-openssl/pem.go deleted file mode 100644 index 6127cf07..00000000 --- a/vendor/github.com/libp2p/go-openssl/pem.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -import ( - "regexp" -) - -var pemSplit *regexp.Regexp = regexp.MustCompile(`(?sm)` + - `(^-----[\s-]*?BEGIN.*?-----[\s-]*?$` + - `.*?` + - `^-----[\s-]*?END.*?-----[\s-]*?$)`) - -func SplitPEM(data []byte) [][]byte { - return pemSplit.FindAll(data, -1) -} diff --git a/vendor/github.com/libp2p/go-openssl/sha1.go b/vendor/github.com/libp2p/go-openssl/sha1.go deleted file mode 100644 index ab4ad87f..00000000 --- a/vendor/github.com/libp2p/go-openssl/sha1.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -// #include "shim.h" -import "C" - -import ( - "errors" - "runtime" - "unsafe" -) - -type SHA1Hash struct { - ctx *C.EVP_MD_CTX - engine *Engine -} - -func NewSHA1Hash() (*SHA1Hash, error) { return NewSHA1HashWithEngine(nil) } - -func NewSHA1HashWithEngine(e *Engine) (*SHA1Hash, error) { - hash := &SHA1Hash{engine: e} - hash.ctx = C.X_EVP_MD_CTX_new() - if hash.ctx == nil { - return nil, errors.New("openssl: sha1: unable to allocate ctx") - } - runtime.SetFinalizer(hash, func(hash *SHA1Hash) { hash.Close() }) - if err := hash.Reset(); err != nil { - return nil, err - } - return hash, nil -} - -func (s *SHA1Hash) Close() { - if s.ctx != nil { - C.X_EVP_MD_CTX_free(s.ctx) - s.ctx = nil - } -} - -func engineRef(e *Engine) *C.ENGINE { - if e == nil { - return nil - } - return e.e -} - -func (s *SHA1Hash) Reset() error { - if C.X_EVP_DigestInit_ex(s.ctx, C.X_EVP_sha1(), engineRef(s.engine)) != 1 { - return errors.New("openssl: sha1: cannot init digest ctx") - } - return nil -} - -func (s *SHA1Hash) Write(p []byte) (n int, err error) { - if len(p) == 0 { - return 0, nil - } - if C.X_EVP_DigestUpdate(s.ctx, unsafe.Pointer(&p[0]), - C.size_t(len(p))) != 1 { - return 0, errors.New("openssl: sha1: cannot update digest") - } - return len(p), nil -} - -func (s *SHA1Hash) Sum() (result [20]byte, err error) { - if C.X_EVP_DigestFinal_ex(s.ctx, - (*C.uchar)(unsafe.Pointer(&result[0])), nil) != 1 { - return result, errors.New("openssl: sha1: cannot finalize ctx") - } - return result, s.Reset() -} - -func SHA1(data []byte) (result [20]byte, err error) { - hash, err := NewSHA1Hash() - if err != nil { - return result, err - } - defer hash.Close() - if _, err := hash.Write(data); err != nil { - return result, err - } - return hash.Sum() -} diff --git a/vendor/github.com/libp2p/go-openssl/sha256.go b/vendor/github.com/libp2p/go-openssl/sha256.go deleted file mode 100644 index d9189a94..00000000 --- a/vendor/github.com/libp2p/go-openssl/sha256.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -// #include "shim.h" -import "C" - -import ( - "errors" - "runtime" - "unsafe" -) - -type SHA256Hash struct { - ctx *C.EVP_MD_CTX - engine *Engine -} - -func NewSHA256Hash() (*SHA256Hash, error) { return NewSHA256HashWithEngine(nil) } - -func NewSHA256HashWithEngine(e *Engine) (*SHA256Hash, error) { - hash := &SHA256Hash{engine: e} - hash.ctx = C.X_EVP_MD_CTX_new() - if hash.ctx == nil { - return nil, errors.New("openssl: sha256: unable to allocate ctx") - } - runtime.SetFinalizer(hash, func(hash *SHA256Hash) { hash.Close() }) - if err := hash.Reset(); err != nil { - return nil, err - } - return hash, nil -} - -func (s *SHA256Hash) Close() { - if s.ctx != nil { - C.X_EVP_MD_CTX_free(s.ctx) - s.ctx = nil - } -} - -func (s *SHA256Hash) Reset() error { - if C.X_EVP_DigestInit_ex(s.ctx, C.X_EVP_sha256(), engineRef(s.engine)) != 1 { - return errors.New("openssl: sha256: cannot init digest ctx") - } - return nil -} - -func (s *SHA256Hash) Write(p []byte) (n int, err error) { - if len(p) == 0 { - return 0, nil - } - if C.X_EVP_DigestUpdate(s.ctx, unsafe.Pointer(&p[0]), - C.size_t(len(p))) != 1 { - return 0, errors.New("openssl: sha256: cannot update digest") - } - return len(p), nil -} - -func (s *SHA256Hash) Sum() (result [32]byte, err error) { - if C.X_EVP_DigestFinal_ex(s.ctx, - (*C.uchar)(unsafe.Pointer(&result[0])), nil) != 1 { - return result, errors.New("openssl: sha256: cannot finalize ctx") - } - return result, s.Reset() -} - -func SHA256(data []byte) (result [32]byte, err error) { - hash, err := NewSHA256Hash() - if err != nil { - return result, err - } - defer hash.Close() - if _, err := hash.Write(data); err != nil { - return result, err - } - return hash.Sum() -} diff --git a/vendor/github.com/libp2p/go-openssl/shim.c b/vendor/github.com/libp2p/go-openssl/shim.c deleted file mode 100644 index b27a5743..00000000 --- a/vendor/github.com/libp2p/go-openssl/shim.c +++ /dev/null @@ -1,778 +0,0 @@ -/* - * Copyright (C) 2014 Space Monkey, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include - -#include - -#include -#include -#include -#include -#include -#include - -#include "_cgo_export.h" - -/* - * Functions defined in other .c files - */ -extern int go_init_locks(); -extern void go_thread_locking_callback(int, int, const char*, int); -extern unsigned long go_thread_id_callback(); -static int go_write_bio_puts(BIO *b, const char *str) { - return go_write_bio_write(b, (char*)str, (int)strlen(str)); -} - -/* - ************************************************ - * v1.1.1 and later implementation - ************************************************ - */ -#if OPENSSL_VERSION_NUMBER >= 0x1010100fL - -const int X_ED25519_SUPPORT = 1; -int X_EVP_PKEY_ED25519 = EVP_PKEY_ED25519; - -int X_EVP_DigestSignInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, - const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey){ - return EVP_DigestSignInit(ctx, pctx, type, e, pkey); -} - -int X_EVP_DigestSign(EVP_MD_CTX *ctx, unsigned char *sigret, - size_t *siglen, const unsigned char *tbs, size_t tbslen) { - return EVP_DigestSign(ctx, sigret, siglen, tbs, tbslen); -} - - -int X_EVP_DigestVerifyInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, - const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey){ - return EVP_DigestVerifyInit(ctx, pctx, type, e, pkey); -} - -int X_EVP_DigestVerify(EVP_MD_CTX *ctx, const unsigned char *sigret, - size_t siglen, const unsigned char *tbs, size_t tbslen){ - return EVP_DigestVerify(ctx, sigret, siglen, tbs, tbslen); -} - -#else - -const int X_ED25519_SUPPORT = 0; -int X_EVP_PKEY_ED25519 = EVP_PKEY_NONE; - -int X_EVP_DigestSignInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, - const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey){ - return 0; -} - -int X_EVP_DigestSign(EVP_MD_CTX *ctx, unsigned char *sigret, - size_t *siglen, const unsigned char *tbs, size_t tbslen) { - return 0; -} - - -int X_EVP_DigestVerifyInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, - const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey){ - return 0; -} - -int X_EVP_DigestVerify(EVP_MD_CTX *ctx, const unsigned char *sigret, - size_t siglen, const unsigned char *tbs, size_t tbslen){ - return 0; -} - -#endif - -/* - ************************************************ - * v1.1.X and later implementation - ************************************************ - */ -#if OPENSSL_VERSION_NUMBER >= 0x1010000fL - -void X_BIO_set_data(BIO* bio, void* data) { - BIO_set_data(bio, data); -} - -void* X_BIO_get_data(BIO* bio) { - return BIO_get_data(bio); -} - -EVP_MD_CTX* X_EVP_MD_CTX_new() { - return EVP_MD_CTX_new(); -} - -void X_EVP_MD_CTX_free(EVP_MD_CTX* ctx) { - EVP_MD_CTX_free(ctx); -} - -static int x_bio_create(BIO *b) { - BIO_set_shutdown(b, 1); - BIO_set_init(b, 1); - BIO_set_data(b, NULL); - BIO_clear_flags(b, ~0); - return 1; -} - -static int x_bio_free(BIO *b) { - return 1; -} - -static BIO_METHOD *writeBioMethod; -static BIO_METHOD *readBioMethod; - -BIO_METHOD* BIO_s_readBio() { return readBioMethod; } -BIO_METHOD* BIO_s_writeBio() { return writeBioMethod; } - -int x_bio_init_methods() { - writeBioMethod = BIO_meth_new(BIO_TYPE_SOURCE_SINK, "Go Write BIO"); - if (!writeBioMethod) { - return 1; - } - if (1 != BIO_meth_set_write(writeBioMethod, - (int (*)(BIO *, const char *, int))go_write_bio_write)) { - return 2; - } - if (1 != BIO_meth_set_puts(writeBioMethod, go_write_bio_puts)) { - return 3; - } - if (1 != BIO_meth_set_ctrl(writeBioMethod, go_write_bio_ctrl)) { - return 4; - } - if (1 != BIO_meth_set_create(writeBioMethod, x_bio_create)) { - return 5; - } - if (1 != BIO_meth_set_destroy(writeBioMethod, x_bio_free)) { - return 6; - } - - readBioMethod = BIO_meth_new(BIO_TYPE_SOURCE_SINK, "Go Read BIO"); - if (!readBioMethod) { - return 7; - } - if (1 != BIO_meth_set_read(readBioMethod, go_read_bio_read)) { - return 8; - } - if (1 != BIO_meth_set_ctrl(readBioMethod, go_read_bio_ctrl)) { - return 9; - } - if (1 != BIO_meth_set_create(readBioMethod, x_bio_create)) { - return 10; - } - if (1 != BIO_meth_set_destroy(readBioMethod, x_bio_free)) { - return 11; - } - - return 0; -} - -const EVP_MD *X_EVP_dss() { - return NULL; -} - -const EVP_MD *X_EVP_dss1() { - return NULL; -} - -const EVP_MD *X_EVP_sha() { - return NULL; -} - -int X_EVP_CIPHER_CTX_encrypting(const EVP_CIPHER_CTX *ctx) { - return EVP_CIPHER_CTX_encrypting(ctx); -} - -int X_X509_add_ref(X509* x509) { - return X509_up_ref(x509); -} - -const ASN1_TIME *X_X509_get0_notBefore(const X509 *x) { - return X509_get0_notBefore(x); -} - -const ASN1_TIME *X_X509_get0_notAfter(const X509 *x) { - return X509_get0_notAfter(x); -} - -HMAC_CTX *X_HMAC_CTX_new(void) { - return HMAC_CTX_new(); -} - -void X_HMAC_CTX_free(HMAC_CTX *ctx) { - HMAC_CTX_free(ctx); -} - -int X_PEM_write_bio_PrivateKey_traditional(BIO *bio, EVP_PKEY *key, const EVP_CIPHER *enc, unsigned char *kstr, int klen, pem_password_cb *cb, void *u) { - return PEM_write_bio_PrivateKey_traditional(bio, key, enc, kstr, klen, cb, u); -} - -#endif - -/* - ************************************************ - * v1.0.X implementation - ************************************************ - */ -#if OPENSSL_VERSION_NUMBER < 0x1010000fL - -static int x_bio_create(BIO *b) { - b->shutdown = 1; - b->init = 1; - b->num = -1; - b->ptr = NULL; - b->flags = 0; - return 1; -} - -static int x_bio_free(BIO *b) { - return 1; -} - -static BIO_METHOD writeBioMethod = { - BIO_TYPE_SOURCE_SINK, - "Go Write BIO", - (int (*)(BIO *, const char *, int))go_write_bio_write, - NULL, - go_write_bio_puts, - NULL, - go_write_bio_ctrl, - x_bio_create, - x_bio_free, - NULL}; - -static BIO_METHOD* BIO_s_writeBio() { return &writeBioMethod; } - -static BIO_METHOD readBioMethod = { - BIO_TYPE_SOURCE_SINK, - "Go Read BIO", - NULL, - go_read_bio_read, - NULL, - NULL, - go_read_bio_ctrl, - x_bio_create, - x_bio_free, - NULL}; - -static BIO_METHOD* BIO_s_readBio() { return &readBioMethod; } - -int x_bio_init_methods() { - /* statically initialized above */ - return 0; -} - -void X_BIO_set_data(BIO* bio, void* data) { - bio->ptr = data; -} - -void* X_BIO_get_data(BIO* bio) { - return bio->ptr; -} - -EVP_MD_CTX* X_EVP_MD_CTX_new() { - return EVP_MD_CTX_create(); -} - -void X_EVP_MD_CTX_free(EVP_MD_CTX* ctx) { - EVP_MD_CTX_destroy(ctx); -} - -int X_X509_add_ref(X509* x509) { - CRYPTO_add(&x509->references, 1, CRYPTO_LOCK_X509); - return 1; -} - -const ASN1_TIME *X_X509_get0_notBefore(const X509 *x) { - return x->cert_info->validity->notBefore; -} - -const ASN1_TIME *X_X509_get0_notAfter(const X509 *x) { - return x->cert_info->validity->notAfter; -} - -const EVP_MD *X_EVP_dss() { - return EVP_dss(); -} - -const EVP_MD *X_EVP_dss1() { - return EVP_dss1(); -} - -const EVP_MD *X_EVP_sha() { - return EVP_sha(); -} - -int X_EVP_CIPHER_CTX_encrypting(const EVP_CIPHER_CTX *ctx) { - return ctx->encrypt; -} - -HMAC_CTX *X_HMAC_CTX_new(void) { - /* v1.1.0 uses a OPENSSL_zalloc to allocate the memory which does not exist - * in previous versions. malloc+memset to get the same behavior */ - HMAC_CTX *ctx = (HMAC_CTX *)OPENSSL_malloc(sizeof(HMAC_CTX)); - if (ctx) { - memset(ctx, 0, sizeof(HMAC_CTX)); - HMAC_CTX_init(ctx); - } - return ctx; -} - -void X_HMAC_CTX_free(HMAC_CTX *ctx) { - if (ctx) { - HMAC_CTX_cleanup(ctx); - OPENSSL_free(ctx); - } -} - -int X_PEM_write_bio_PrivateKey_traditional(BIO *bio, EVP_PKEY *key, const EVP_CIPHER *enc, unsigned char *kstr, int klen, pem_password_cb *cb, void *u) { - /* PEM_write_bio_PrivateKey always tries to use the PKCS8 format if it - * is available, instead of using the "traditional" format as stated in the - * OpenSSL man page. - * i2d_PrivateKey should give us the correct DER encoding, so we'll just - * use PEM_ASN1_write_bio directly to write the DER encoding with the correct - * type header. */ - - int ppkey_id, pkey_base_id, ppkey_flags; - const char *pinfo, *ppem_str; - char pem_type_str[80]; - - // Lookup the ASN1 method information to get the pem type - if (EVP_PKEY_asn1_get0_info(&ppkey_id, &pkey_base_id, &ppkey_flags, &pinfo, &ppem_str, key->ameth) != 1) { - return 0; - } - // Set up the PEM type string - if (BIO_snprintf(pem_type_str, 80, "%s PRIVATE KEY", ppem_str) <= 0) { - // Failed to write out the pem type string, something is really wrong. - return 0; - } - // Write out everything to the BIO - return PEM_ASN1_write_bio((i2d_of_void *)i2d_PrivateKey, - pem_type_str, bio, key, enc, kstr, klen, cb, u); -} - -#endif - -/* - ************************************************ - * common implementation - ************************************************ - */ - -int X_shim_init() { - int rc = 0; - - OPENSSL_config(NULL); - ENGINE_load_builtin_engines(); - SSL_load_error_strings(); - SSL_library_init(); - OpenSSL_add_all_algorithms(); - // - // Set up OPENSSL thread safety callbacks. - rc = go_init_locks(); - if (rc != 0) { - return rc; - } - CRYPTO_set_locking_callback(go_thread_locking_callback); - CRYPTO_set_id_callback(go_thread_id_callback); - - rc = x_bio_init_methods(); - if (rc != 0) { - return rc; - } - - return 0; -} - -void * X_OPENSSL_malloc(size_t size) { - return OPENSSL_malloc(size); -} - -void X_OPENSSL_free(void *ref) { - OPENSSL_free(ref); -} - -long X_SSL_set_options(SSL* ssl, long options) { - return SSL_set_options(ssl, options); -} - -long X_SSL_get_options(SSL* ssl) { - return SSL_get_options(ssl); -} - -long X_SSL_clear_options(SSL* ssl, long options) { - return SSL_clear_options(ssl, options); -} - -long X_SSL_set_tlsext_host_name(SSL *ssl, const char *name) { - return SSL_set_tlsext_host_name(ssl, name); -} -const char * X_SSL_get_cipher_name(const SSL *ssl) { - return SSL_get_cipher_name(ssl); -} -int X_SSL_session_reused(SSL *ssl) { - return SSL_session_reused(ssl); -} - -int X_SSL_new_index() { - return SSL_get_ex_new_index(0, NULL, NULL, NULL, go_ssl_crypto_ex_free); -} - -int X_SSL_verify_cb(int ok, X509_STORE_CTX* store) { - SSL* ssl = (SSL *)X509_STORE_CTX_get_ex_data(store, - SSL_get_ex_data_X509_STORE_CTX_idx()); - void* p = SSL_get_ex_data(ssl, get_ssl_idx()); - // get the pointer to the go Ctx object and pass it back into the thunk - return go_ssl_verify_cb_thunk(p, ok, store); -} - -const SSL_METHOD *X_SSLv23_method() { - return SSLv23_method(); -} - -const SSL_METHOD *X_SSLv3_method() { -#ifndef OPENSSL_NO_SSL3_METHOD - return SSLv3_method(); -#else - return NULL; -#endif -} - -const SSL_METHOD *X_TLSv1_method() { - return TLSv1_method(); -} - -const SSL_METHOD *X_TLSv1_1_method() { -#if defined(TLS1_1_VERSION) && !defined(OPENSSL_SYSNAME_MACOSX) - return TLSv1_1_method(); -#else - return NULL; -#endif -} - -const SSL_METHOD *X_TLSv1_2_method() { -#if defined(TLS1_2_VERSION) && !defined(OPENSSL_SYSNAME_MACOSX) - return TLSv1_2_method(); -#else - return NULL; -#endif -} - -int X_SSL_CTX_new_index() { - return SSL_CTX_get_ex_new_index(0, NULL, NULL, NULL, NULL); -} - -int X_SSL_CTX_set_min_proto_version(SSL_CTX *ctx, int version) { - return SSL_CTX_set_min_proto_version(ctx, version); -} - -int X_SSL_CTX_set_max_proto_version(SSL_CTX *ctx, int version) { - return SSL_CTX_set_max_proto_version(ctx, version); -} - -long X_SSL_CTX_set_options(SSL_CTX* ctx, long options) { - return SSL_CTX_set_options(ctx, options); -} - -long X_SSL_CTX_clear_options(SSL_CTX* ctx, long options) { - return SSL_CTX_clear_options(ctx, options); -} - -long X_SSL_CTX_get_options(SSL_CTX* ctx) { - return SSL_CTX_get_options(ctx); -} - -long X_SSL_CTX_set_mode(SSL_CTX* ctx, long modes) { - return SSL_CTX_set_mode(ctx, modes); -} - -long X_SSL_CTX_get_mode(SSL_CTX* ctx) { - return SSL_CTX_get_mode(ctx); -} - -long X_SSL_CTX_set_session_cache_mode(SSL_CTX* ctx, long modes) { - return SSL_CTX_set_session_cache_mode(ctx, modes); -} - -long X_SSL_CTX_sess_set_cache_size(SSL_CTX* ctx, long t) { - return SSL_CTX_sess_set_cache_size(ctx, t); -} - -long X_SSL_CTX_sess_get_cache_size(SSL_CTX* ctx) { - return SSL_CTX_sess_get_cache_size(ctx); -} - -long X_SSL_CTX_set_timeout(SSL_CTX* ctx, long t) { - return SSL_CTX_set_timeout(ctx, t); -} - -long X_SSL_CTX_get_timeout(SSL_CTX* ctx) { - return SSL_CTX_get_timeout(ctx); -} - -long X_SSL_CTX_add_extra_chain_cert(SSL_CTX* ctx, X509 *cert) { - return SSL_CTX_add_extra_chain_cert(ctx, cert); -} - -long X_SSL_CTX_set_tmp_ecdh(SSL_CTX* ctx, EC_KEY *key) { - return SSL_CTX_set_tmp_ecdh(ctx, key); -} - -long X_SSL_CTX_set_tlsext_servername_callback( - SSL_CTX* ctx, int (*cb)(SSL *con, int *ad, void *args)) { - return SSL_CTX_set_tlsext_servername_callback(ctx, cb); -} - -int X_SSL_CTX_verify_cb(int ok, X509_STORE_CTX* store) { - SSL* ssl = (SSL *)X509_STORE_CTX_get_ex_data(store, - SSL_get_ex_data_X509_STORE_CTX_idx()); - SSL_CTX* ssl_ctx = SSL_get_SSL_CTX(ssl); - void* p = SSL_CTX_get_ex_data(ssl_ctx, get_ssl_ctx_idx()); - // get the pointer to the go Ctx object and pass it back into the thunk - return go_ssl_ctx_verify_cb_thunk(p, ok, store); -} - -long X_SSL_CTX_set_tmp_dh(SSL_CTX* ctx, DH *dh) { - return SSL_CTX_set_tmp_dh(ctx, dh); -} - -long X_PEM_read_DHparams(SSL_CTX* ctx, DH *dh) { - return SSL_CTX_set_tmp_dh(ctx, dh); -} - -int X_SSL_CTX_set_tlsext_ticket_key_cb(SSL_CTX *sslctx, - int (*cb)(SSL *s, unsigned char key_name[16], - unsigned char iv[EVP_MAX_IV_LENGTH], - EVP_CIPHER_CTX *ctx, HMAC_CTX *hctx, int enc)) { - return SSL_CTX_set_tlsext_ticket_key_cb(sslctx, cb); -} - -int X_SSL_CTX_ticket_key_cb(SSL *s, unsigned char key_name[16], - unsigned char iv[EVP_MAX_IV_LENGTH], - EVP_CIPHER_CTX *cctx, HMAC_CTX *hctx, int enc) { - - SSL_CTX* ssl_ctx = SSL_get_SSL_CTX(s); - void* p = SSL_CTX_get_ex_data(ssl_ctx, get_ssl_ctx_idx()); - // get the pointer to the go Ctx object and pass it back into the thunk - return go_ticket_key_cb_thunk(p, s, key_name, iv, cctx, hctx, enc); -} - -int X_BIO_get_flags(BIO *b) { - return BIO_get_flags(b); -} - -void X_BIO_set_flags(BIO *b, int flags) { - return BIO_set_flags(b, flags); -} - -void X_BIO_clear_flags(BIO *b, int flags) { - BIO_clear_flags(b, flags); -} - -int X_BIO_read(BIO *b, void *buf, int len) { - return BIO_read(b, buf, len); -} - -int X_BIO_write(BIO *b, const void *buf, int len) { - return BIO_write(b, buf, len); -} - -BIO *X_BIO_new_write_bio() { - return BIO_new(BIO_s_writeBio()); -} - -BIO *X_BIO_new_read_bio() { - return BIO_new(BIO_s_readBio()); -} - -const EVP_MD *X_EVP_get_digestbyname(const char *name) { - return EVP_get_digestbyname(name); -} - -const EVP_MD *X_EVP_md_null() { - return EVP_md_null(); -} - -const EVP_MD *X_EVP_md5() { - return EVP_md5(); -} - -const EVP_MD *X_EVP_md4() { - return EVP_md4(); -} - -const EVP_MD *X_EVP_ripemd160() { - return EVP_ripemd160(); -} - -const EVP_MD *X_EVP_sha224() { - return EVP_sha224(); -} - -const EVP_MD *X_EVP_sha1() { - return EVP_sha1(); -} - -const EVP_MD *X_EVP_sha256() { - return EVP_sha256(); -} - -const EVP_MD *X_EVP_sha384() { - return EVP_sha384(); -} - -const EVP_MD *X_EVP_sha512() { - return EVP_sha512(); -} - -int X_EVP_MD_size(const EVP_MD *md) { - return EVP_MD_size(md); -} - -int X_EVP_DigestInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *impl) { - return EVP_DigestInit_ex(ctx, type, impl); -} - -int X_EVP_DigestUpdate(EVP_MD_CTX *ctx, const void *d, size_t cnt) { - return EVP_DigestUpdate(ctx, d, cnt); -} - -int X_EVP_DigestFinal_ex(EVP_MD_CTX *ctx, unsigned char *md, unsigned int *s) { - return EVP_DigestFinal_ex(ctx, md, s); -} - -int X_EVP_SignInit(EVP_MD_CTX *ctx, const EVP_MD *type) { - return EVP_SignInit(ctx, type); -} - -int X_EVP_SignUpdate(EVP_MD_CTX *ctx, const void *d, unsigned int cnt) { - return EVP_SignUpdate(ctx, d, cnt); -} - -EVP_PKEY *X_EVP_PKEY_new(void) { - return EVP_PKEY_new(); -} - -void X_EVP_PKEY_free(EVP_PKEY *pkey) { - EVP_PKEY_free(pkey); -} - -int X_EVP_PKEY_size(EVP_PKEY *pkey) { - return EVP_PKEY_size(pkey); -} - -struct rsa_st *X_EVP_PKEY_get1_RSA(EVP_PKEY *pkey) { - return EVP_PKEY_get1_RSA(pkey); -} - -int X_EVP_PKEY_set1_RSA(EVP_PKEY *pkey, struct rsa_st *key) { - return EVP_PKEY_set1_RSA(pkey, key); -} - -int X_EVP_PKEY_assign_charp(EVP_PKEY *pkey, int type, char *key) { - return EVP_PKEY_assign(pkey, type, key); -} - -int X_EVP_SignFinal(EVP_MD_CTX *ctx, unsigned char *md, unsigned int *s, EVP_PKEY *pkey) { - return EVP_SignFinal(ctx, md, s, pkey); -} - -int X_EVP_VerifyInit(EVP_MD_CTX *ctx, const EVP_MD *type) { - return EVP_VerifyInit(ctx, type); -} - -int X_EVP_VerifyUpdate(EVP_MD_CTX *ctx, const void *d, - unsigned int cnt) { - return EVP_VerifyUpdate(ctx, d, cnt); -} - -int X_EVP_VerifyFinal(EVP_MD_CTX *ctx, const unsigned char *sigbuf, unsigned int siglen, EVP_PKEY *pkey) { - return EVP_VerifyFinal(ctx, sigbuf, siglen, pkey); -} - -int X_EVP_CIPHER_block_size(EVP_CIPHER *c) { - return EVP_CIPHER_block_size(c); -} - -int X_EVP_CIPHER_key_length(EVP_CIPHER *c) { - return EVP_CIPHER_key_length(c); -} - -int X_EVP_CIPHER_iv_length(EVP_CIPHER *c) { - return EVP_CIPHER_iv_length(c); -} - -int X_EVP_CIPHER_nid(EVP_CIPHER *c) { - return EVP_CIPHER_nid(c); -} - -int X_EVP_CIPHER_CTX_block_size(EVP_CIPHER_CTX *ctx) { - return EVP_CIPHER_CTX_block_size(ctx); -} - -int X_EVP_CIPHER_CTX_key_length(EVP_CIPHER_CTX *ctx) { - return EVP_CIPHER_CTX_key_length(ctx); -} - -int X_EVP_CIPHER_CTX_iv_length(EVP_CIPHER_CTX *ctx) { - return EVP_CIPHER_CTX_iv_length(ctx); -} - -void X_EVP_CIPHER_CTX_set_padding(EVP_CIPHER_CTX *ctx, int padding) { - //openssl always returns 1 for set_padding - //hence return value is not checked - EVP_CIPHER_CTX_set_padding(ctx, padding); -} - -const EVP_CIPHER *X_EVP_CIPHER_CTX_cipher(EVP_CIPHER_CTX *ctx) { - return EVP_CIPHER_CTX_cipher(ctx); -} - -int X_EVP_PKEY_CTX_set_ec_paramgen_curve_nid(EVP_PKEY_CTX *ctx, int nid) { - return EVP_PKEY_CTX_set_ec_paramgen_curve_nid(ctx, nid); -} - -size_t X_HMAC_size(const HMAC_CTX *e) { - return HMAC_size(e); -} - -int X_HMAC_Init_ex(HMAC_CTX *ctx, const void *key, int len, const EVP_MD *md, ENGINE *impl) { - return HMAC_Init_ex(ctx, key, len, md, impl); -} - -int X_HMAC_Update(HMAC_CTX *ctx, const unsigned char *data, size_t len) { - return HMAC_Update(ctx, data, len); -} - -int X_HMAC_Final(HMAC_CTX *ctx, unsigned char *md, unsigned int *len) { - return HMAC_Final(ctx, md, len); -} - -int X_sk_X509_num(STACK_OF(X509) *sk) { - return sk_X509_num(sk); -} - -X509 *X_sk_X509_value(STACK_OF(X509)* sk, int i) { - return sk_X509_value(sk, i); -} - -long X_X509_get_version(const X509 *x) { - return X509_get_version(x); -} - -int X_X509_set_version(X509 *x, long version) { - return X509_set_version(x, version); -} diff --git a/vendor/github.com/libp2p/go-openssl/shim.h b/vendor/github.com/libp2p/go-openssl/shim.h deleted file mode 100644 index 94fe8c61..00000000 --- a/vendor/github.com/libp2p/go-openssl/shim.h +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Copyright (C) 2014 Space Monkey, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -#ifndef SSL_MODE_RELEASE_BUFFERS -#define SSL_MODE_RELEASE_BUFFERS 0 -#endif - -#ifndef SSL_OP_NO_COMPRESSION -#define SSL_OP_NO_COMPRESSION 0 -#endif - -/* shim methods */ -extern int X_shim_init(); - -/* Library methods */ -extern void X_OPENSSL_free(void *ref); -extern void *X_OPENSSL_malloc(size_t size); - -/* SSL methods */ -extern long X_SSL_set_options(SSL* ssl, long options); -extern long X_SSL_get_options(SSL* ssl); -extern long X_SSL_clear_options(SSL* ssl, long options); -extern long X_SSL_set_tlsext_host_name(SSL *ssl, const char *name); -extern const char * X_SSL_get_cipher_name(const SSL *ssl); -extern int X_SSL_session_reused(SSL *ssl); -extern int X_SSL_new_index(); - -extern const SSL_METHOD *X_SSLv23_method(); -extern const SSL_METHOD *X_SSLv3_method(); -extern const SSL_METHOD *X_TLSv1_method(); -extern const SSL_METHOD *X_TLSv1_1_method(); -extern const SSL_METHOD *X_TLSv1_2_method(); - -#if defined SSL_CTRL_SET_TLSEXT_HOSTNAME -extern int sni_cb(SSL *ssl_conn, int *ad, void *arg); -#endif -extern int X_SSL_verify_cb(int ok, X509_STORE_CTX* store); - -/* SSL_CTX methods */ -extern int X_SSL_CTX_new_index(); -extern int X_SSL_CTX_set_min_proto_version(SSL_CTX *ctx, int version); -extern int X_SSL_CTX_set_max_proto_version(SSL_CTX *ctx, int version); -extern long X_SSL_CTX_set_options(SSL_CTX* ctx, long options); -extern long X_SSL_CTX_clear_options(SSL_CTX* ctx, long options); -extern long X_SSL_CTX_get_options(SSL_CTX* ctx); -extern long X_SSL_CTX_set_mode(SSL_CTX* ctx, long modes); -extern long X_SSL_CTX_get_mode(SSL_CTX* ctx); -extern long X_SSL_CTX_set_session_cache_mode(SSL_CTX* ctx, long modes); -extern long X_SSL_CTX_sess_set_cache_size(SSL_CTX* ctx, long t); -extern long X_SSL_CTX_sess_get_cache_size(SSL_CTX* ctx); -extern long X_SSL_CTX_set_timeout(SSL_CTX* ctx, long t); -extern long X_SSL_CTX_get_timeout(SSL_CTX* ctx); -extern long X_SSL_CTX_add_extra_chain_cert(SSL_CTX* ctx, X509 *cert); -extern long X_SSL_CTX_set_tmp_ecdh(SSL_CTX* ctx, EC_KEY *key); -extern long X_SSL_CTX_set_tlsext_servername_callback(SSL_CTX* ctx, int (*cb)(SSL *con, int *ad, void *args)); -extern int X_SSL_CTX_verify_cb(int ok, X509_STORE_CTX* store); -extern long X_SSL_CTX_set_tmp_dh(SSL_CTX* ctx, DH *dh); -extern long X_PEM_read_DHparams(SSL_CTX* ctx, DH *dh); -extern int X_SSL_CTX_set_tlsext_ticket_key_cb(SSL_CTX *sslctx, - int (*cb)(SSL *s, unsigned char key_name[16], - unsigned char iv[EVP_MAX_IV_LENGTH], - EVP_CIPHER_CTX *ctx, HMAC_CTX *hctx, int enc)); -extern int X_SSL_CTX_ticket_key_cb(SSL *s, unsigned char key_name[16], - unsigned char iv[EVP_MAX_IV_LENGTH], - EVP_CIPHER_CTX *cctx, HMAC_CTX *hctx, int enc); -extern int SSL_CTX_set_alpn_protos(SSL_CTX *ctx, const unsigned char *protos, - unsigned int protos_len); - -/* BIO methods */ -extern int X_BIO_get_flags(BIO *b); -extern void X_BIO_set_flags(BIO *bio, int flags); -extern void X_BIO_clear_flags(BIO *bio, int flags); -extern void X_BIO_set_data(BIO *bio, void* data); -extern void *X_BIO_get_data(BIO *bio); -extern int X_BIO_read(BIO *b, void *buf, int len); -extern int X_BIO_write(BIO *b, const void *buf, int len); -extern BIO *X_BIO_new_write_bio(); -extern BIO *X_BIO_new_read_bio(); - -/* EVP methods */ -extern const int X_ED25519_SUPPORT; -extern int X_EVP_PKEY_ED25519; -extern const EVP_MD *X_EVP_get_digestbyname(const char *name); -extern EVP_MD_CTX *X_EVP_MD_CTX_new(); -extern void X_EVP_MD_CTX_free(EVP_MD_CTX *ctx); -extern const EVP_MD *X_EVP_md_null(); -extern const EVP_MD *X_EVP_md5(); -extern const EVP_MD *X_EVP_md4(); -extern const EVP_MD *X_EVP_sha(); -extern const EVP_MD *X_EVP_sha1(); -extern const EVP_MD *X_EVP_dss(); -extern const EVP_MD *X_EVP_dss1(); -extern const EVP_MD *X_EVP_ripemd160(); -extern const EVP_MD *X_EVP_sha224(); -extern const EVP_MD *X_EVP_sha256(); -extern const EVP_MD *X_EVP_sha384(); -extern const EVP_MD *X_EVP_sha512(); -extern int X_EVP_MD_size(const EVP_MD *md); -extern int X_EVP_DigestInit_ex(EVP_MD_CTX *ctx, const EVP_MD *type, ENGINE *impl); -extern int X_EVP_DigestUpdate(EVP_MD_CTX *ctx, const void *d, size_t cnt); -extern int X_EVP_DigestFinal_ex(EVP_MD_CTX *ctx, unsigned char *md, unsigned int *s); -extern int X_EVP_SignInit(EVP_MD_CTX *ctx, const EVP_MD *type); -extern int X_EVP_SignUpdate(EVP_MD_CTX *ctx, const void *d, unsigned int cnt); -extern int X_EVP_DigestSignInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey); -extern int X_EVP_DigestSign(EVP_MD_CTX *ctx, unsigned char *sigret, size_t *siglen, const unsigned char *tbs, size_t tbslen); -extern EVP_PKEY *X_EVP_PKEY_new(void); -extern void X_EVP_PKEY_free(EVP_PKEY *pkey); -extern int X_EVP_PKEY_size(EVP_PKEY *pkey); -extern struct rsa_st *X_EVP_PKEY_get1_RSA(EVP_PKEY *pkey); -extern int X_EVP_PKEY_set1_RSA(EVP_PKEY *pkey, struct rsa_st *key); -extern int X_EVP_PKEY_assign_charp(EVP_PKEY *pkey, int type, char *key); -extern int X_EVP_SignFinal(EVP_MD_CTX *ctx, unsigned char *md, unsigned int *s, EVP_PKEY *pkey); -extern int X_EVP_VerifyInit(EVP_MD_CTX *ctx, const EVP_MD *type); -extern int X_EVP_VerifyUpdate(EVP_MD_CTX *ctx, const void *d, unsigned int cnt); -extern int X_EVP_VerifyFinal(EVP_MD_CTX *ctx, const unsigned char *sigbuf, unsigned int siglen, EVP_PKEY *pkey); -extern int X_EVP_DigestVerifyInit(EVP_MD_CTX *ctx, EVP_PKEY_CTX **pctx, const EVP_MD *type, ENGINE *e, EVP_PKEY *pkey); -extern int X_EVP_DigestVerify(EVP_MD_CTX *ctx, const unsigned char *sigret, size_t siglen, const unsigned char *tbs, size_t tbslen); -extern int X_EVP_CIPHER_block_size(EVP_CIPHER *c); -extern int X_EVP_CIPHER_key_length(EVP_CIPHER *c); -extern int X_EVP_CIPHER_iv_length(EVP_CIPHER *c); -extern int X_EVP_CIPHER_nid(EVP_CIPHER *c); -extern int X_EVP_CIPHER_CTX_block_size(EVP_CIPHER_CTX *ctx); -extern int X_EVP_CIPHER_CTX_key_length(EVP_CIPHER_CTX *ctx); -extern int X_EVP_CIPHER_CTX_iv_length(EVP_CIPHER_CTX *ctx); -extern void X_EVP_CIPHER_CTX_set_padding(EVP_CIPHER_CTX *ctx, int padding); -extern const EVP_CIPHER *X_EVP_CIPHER_CTX_cipher(EVP_CIPHER_CTX *ctx); -extern int X_EVP_CIPHER_CTX_encrypting(const EVP_CIPHER_CTX *ctx); -extern int X_EVP_PKEY_CTX_set_ec_paramgen_curve_nid(EVP_PKEY_CTX *ctx, int nid); - -/* HMAC methods */ -extern size_t X_HMAC_size(const HMAC_CTX *e); -extern HMAC_CTX *X_HMAC_CTX_new(void); -extern void X_HMAC_CTX_free(HMAC_CTX *ctx); -extern int X_HMAC_Init_ex(HMAC_CTX *ctx, const void *key, int len, const EVP_MD *md, ENGINE *impl); -extern int X_HMAC_Update(HMAC_CTX *ctx, const unsigned char *data, size_t len); -extern int X_HMAC_Final(HMAC_CTX *ctx, unsigned char *md, unsigned int *len); - -/* X509 methods */ -extern int X_X509_add_ref(X509* x509); -extern const ASN1_TIME *X_X509_get0_notBefore(const X509 *x); -extern const ASN1_TIME *X_X509_get0_notAfter(const X509 *x); -extern int X_sk_X509_num(STACK_OF(X509) *sk); -extern X509 *X_sk_X509_value(STACK_OF(X509)* sk, int i); -extern long X_X509_get_version(const X509 *x); -extern int X_X509_set_version(X509 *x, long version); - -/* PEM methods */ -extern int X_PEM_write_bio_PrivateKey_traditional(BIO *bio, EVP_PKEY *key, const EVP_CIPHER *enc, unsigned char *kstr, int klen, pem_password_cb *cb, void *u); - -/* Object methods */ -extern int OBJ_create(const char *oid,const char *sn,const char *ln); - -/* Extension helper method */ -extern const unsigned char * get_extention(X509 *x, int NID, int *data_len); -extern int add_custom_ext(X509 *cert, int nid, char *value, int len); \ No newline at end of file diff --git a/vendor/github.com/libp2p/go-openssl/sni.c b/vendor/github.com/libp2p/go-openssl/sni.c deleted file mode 100644 index f9e8d16b..00000000 --- a/vendor/github.com/libp2p/go-openssl/sni.c +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include -#include "_cgo_export.h" -#include - -int sni_cb(SSL *con, int *ad, void *arg) { - SSL_CTX* ssl_ctx = ssl_ctx = SSL_get_SSL_CTX(con); - void* p = SSL_CTX_get_ex_data(ssl_ctx, get_ssl_ctx_idx()); - return sni_cb_thunk(p, con, ad, arg); -} diff --git a/vendor/github.com/libp2p/go-openssl/ssl.go b/vendor/github.com/libp2p/go-openssl/ssl.go deleted file mode 100644 index b187d15d..00000000 --- a/vendor/github.com/libp2p/go-openssl/ssl.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -// #include "shim.h" -import "C" - -import ( - "os" - "unsafe" - - "github.com/mattn/go-pointer" -) - -type SSLTLSExtErr int - -const ( - SSLTLSExtErrOK SSLTLSExtErr = C.SSL_TLSEXT_ERR_OK - SSLTLSExtErrAlertWarning SSLTLSExtErr = C.SSL_TLSEXT_ERR_ALERT_WARNING - SSLTLSEXTErrAlertFatal SSLTLSExtErr = C.SSL_TLSEXT_ERR_ALERT_FATAL - SSLTLSEXTErrNoAck SSLTLSExtErr = C.SSL_TLSEXT_ERR_NOACK -) - -var ( - ssl_idx = C.X_SSL_new_index() -) - -//export get_ssl_idx -func get_ssl_idx() C.int { - return ssl_idx -} - -type SSL struct { - ssl *C.SSL - verify_cb VerifyCallback -} - -//export go_ssl_verify_cb_thunk -func go_ssl_verify_cb_thunk(p unsafe.Pointer, ok C.int, ctx *C.X509_STORE_CTX) C.int { - defer func() { - if err := recover(); err != nil { - logger.Critf("openssl: verify callback panic'd: %v", err) - os.Exit(1) - } - }() - verify_cb := pointer.Restore(p).(*SSL).verify_cb - // set up defaults just in case verify_cb is nil - if verify_cb != nil { - store := &CertificateStoreCtx{ctx: ctx} - if verify_cb(ok == 1, store) { - ok = 1 - } else { - ok = 0 - } - } - return ok -} - -// Wrapper around SSL_get_servername. Returns server name according to rfc6066 -// http://tools.ietf.org/html/rfc6066. -func (s *SSL) GetServername() string { - return C.GoString(C.SSL_get_servername(s.ssl, C.TLSEXT_NAMETYPE_host_name)) -} - -// GetOptions returns SSL options. See -// https://www.openssl.org/docs/ssl/SSL_CTX_set_options.html -func (s *SSL) GetOptions() Options { - return Options(C.X_SSL_get_options(s.ssl)) -} - -// SetOptions sets SSL options. See -// https://www.openssl.org/docs/ssl/SSL_CTX_set_options.html -func (s *SSL) SetOptions(options Options) Options { - return Options(C.X_SSL_set_options(s.ssl, C.long(options))) -} - -// ClearOptions clear SSL options. See -// https://www.openssl.org/docs/ssl/SSL_CTX_set_options.html -func (s *SSL) ClearOptions(options Options) Options { - return Options(C.X_SSL_clear_options(s.ssl, C.long(options))) -} - -// SetVerify controls peer verification settings. See -// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html -func (s *SSL) SetVerify(options VerifyOptions, verify_cb VerifyCallback) { - s.verify_cb = verify_cb - if verify_cb != nil { - C.SSL_set_verify(s.ssl, C.int(options), (*[0]byte)(C.X_SSL_verify_cb)) - } else { - C.SSL_set_verify(s.ssl, C.int(options), nil) - } -} - -// SetVerifyMode controls peer verification setting. See -// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html -func (s *SSL) SetVerifyMode(options VerifyOptions) { - s.SetVerify(options, s.verify_cb) -} - -// SetVerifyCallback controls peer verification setting. See -// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html -func (s *SSL) SetVerifyCallback(verify_cb VerifyCallback) { - s.SetVerify(s.VerifyMode(), verify_cb) -} - -// GetVerifyCallback returns callback function. See -// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html -func (s *SSL) GetVerifyCallback() VerifyCallback { - return s.verify_cb -} - -// VerifyMode returns peer verification setting. See -// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html -func (s *SSL) VerifyMode() VerifyOptions { - return VerifyOptions(C.SSL_get_verify_mode(s.ssl)) -} - -// SetVerifyDepth controls how many certificates deep the certificate -// verification logic is willing to follow a certificate chain. See -// https://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html -func (s *SSL) SetVerifyDepth(depth int) { - C.SSL_set_verify_depth(s.ssl, C.int(depth)) -} - -// GetVerifyDepth controls how many certificates deep the certificate -// verification logic is willing to follow a certificate chain. See -// https://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html -func (s *SSL) GetVerifyDepth() int { - return int(C.SSL_get_verify_depth(s.ssl)) -} - -// SetSSLCtx changes context to new one. Useful for Server Name Indication (SNI) -// rfc6066 http://tools.ietf.org/html/rfc6066. See -// http://stackoverflow.com/questions/22373332/serving-multiple-domains-in-one-box-with-sni -func (s *SSL) SetSSLCtx(ctx *Ctx) { - /* - * SSL_set_SSL_CTX() only changes certs as of 1.0.0d - * adjust other things we care about - */ - C.SSL_set_SSL_CTX(s.ssl, ctx.ctx) -} - -//export sni_cb_thunk -func sni_cb_thunk(p unsafe.Pointer, con *C.SSL, ad unsafe.Pointer, arg unsafe.Pointer) C.int { - defer func() { - if err := recover(); err != nil { - logger.Critf("openssl: verify callback sni panic'd: %v", err) - os.Exit(1) - } - }() - - sni_cb := pointer.Restore(p).(*Ctx).sni_cb - - s := &SSL{ssl: con} - // This attaches a pointer to our SSL struct into the SNI callback. - C.SSL_set_ex_data(s.ssl, get_ssl_idx(), pointer.Save(s)) - - // Note: this is ctx.sni_cb, not C.sni_cb - return C.int(sni_cb(s)) -} diff --git a/vendor/github.com/libp2p/go-openssl/tickets.go b/vendor/github.com/libp2p/go-openssl/tickets.go deleted file mode 100644 index 2ee8ed9b..00000000 --- a/vendor/github.com/libp2p/go-openssl/tickets.go +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright (C) 2017. See AUTHORS. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openssl - -// #include "shim.h" -import "C" - -import ( - "os" - "unsafe" - - "github.com/mattn/go-pointer" -) - -const ( - KeyNameSize = 16 -) - -// TicketCipherCtx describes the cipher that will be used by the ticket store -// for encrypting the tickets. Engine may be nil if no engine is desired. -type TicketCipherCtx struct { - Cipher *Cipher - Engine *Engine -} - -// TicketDigestCtx describes the digest that will be used by the ticket store -// to authenticate the data. Engine may be nil if no engine is desired. -type TicketDigestCtx struct { - Digest *Digest - Engine *Engine -} - -// TicketName is an identifier for the key material for a ticket. -type TicketName [KeyNameSize]byte - -// TicketKey is the key material for a ticket. If this is lost, forward secrecy -// is lost as it allows decrypting TLS sessions retroactively. -type TicketKey struct { - Name TicketName - CipherKey []byte - HMACKey []byte - IV []byte -} - -// TicketKeyManager is a manager for TicketKeys. It allows one to control the -// lifetime of tickets, causing renewals and expirations for keys that are -// created. Calls to the manager are serialized. -type TicketKeyManager interface { - // New should create a brand new TicketKey with a new name. - New() *TicketKey - - // Current should return a key that is still valid. - Current() *TicketKey - - // Lookup should return a key with the given name, or nil if no name - // exists. - Lookup(name TicketName) *TicketKey - - // Expired should return if the key with the given name is expired and - // should not be used any more. - Expired(name TicketName) bool - - // ShouldRenew should return if the key is still ok to use for the current - // session, but we should send a new key for the client. - ShouldRenew(name TicketName) bool -} - -// TicketStore descibes the encryption and authentication methods the tickets -// will use along with a key manager for generating and keeping track of the -// secrets. -type TicketStore struct { - CipherCtx TicketCipherCtx - DigestCtx TicketDigestCtx - Keys TicketKeyManager -} - -func (t *TicketStore) cipherEngine() *C.ENGINE { - if t.CipherCtx.Engine == nil { - return nil - } - return t.CipherCtx.Engine.e -} - -func (t *TicketStore) digestEngine() *C.ENGINE { - if t.DigestCtx.Engine == nil { - return nil - } - return t.DigestCtx.Engine.e -} - -const ( - // instruct to do a handshake - ticket_resp_requireHandshake = 0 - // crypto context is set up correctly - ticket_resp_sessionOk = 1 - // crypto context is ok, but the ticket should be reissued - ticket_resp_renewSession = 2 - // we had a problem that shouldn't fall back to doing a handshake - ticket_resp_error = -1 - - // asked to create session crypto context - ticket_req_newSession = 1 - // asked to load crypto context for a previous session - ticket_req_lookupSession = 0 -) - -//export go_ticket_key_cb_thunk -func go_ticket_key_cb_thunk(p unsafe.Pointer, s *C.SSL, key_name *C.uchar, - iv *C.uchar, cctx *C.EVP_CIPHER_CTX, hctx *C.HMAC_CTX, enc C.int) C.int { - - // no panic's allowed. it's super hard to guarantee any state at this point - // so just abort everything. - defer func() { - if err := recover(); err != nil { - logger.Critf("openssl: ticket key callback panic'd: %v", err) - os.Exit(1) - } - }() - - ctx := pointer.Restore(p).(*Ctx) - store := ctx.ticket_store - if store == nil { - // TODO(jeff): should this be an error condition? it doesn't make sense - // to be called if we don't have a store I believe, but that's probably - // not worth aborting the handshake which is what I believe returning - // an error would do. - return ticket_resp_requireHandshake - } - - ctx.ticket_store_mu.Lock() - defer ctx.ticket_store_mu.Unlock() - - switch enc { - case ticket_req_newSession: - key := store.Keys.Current() - if key == nil { - key = store.Keys.New() - if key == nil { - return ticket_resp_requireHandshake - } - } - - C.memcpy( - unsafe.Pointer(key_name), - unsafe.Pointer(&key.Name[0]), - KeyNameSize) - C.EVP_EncryptInit_ex( - cctx, - store.CipherCtx.Cipher.ptr, - store.cipherEngine(), - (*C.uchar)(&key.CipherKey[0]), - (*C.uchar)(&key.IV[0])) - C.HMAC_Init_ex( - hctx, - unsafe.Pointer(&key.HMACKey[0]), - C.int(len(key.HMACKey)), - store.DigestCtx.Digest.ptr, - store.digestEngine()) - - return ticket_resp_sessionOk - - case ticket_req_lookupSession: - var name TicketName - C.memcpy( - unsafe.Pointer(&name[0]), - unsafe.Pointer(key_name), - KeyNameSize) - - key := store.Keys.Lookup(name) - if key == nil { - return ticket_resp_requireHandshake - } - if store.Keys.Expired(name) { - return ticket_resp_requireHandshake - } - - C.EVP_DecryptInit_ex( - cctx, - store.CipherCtx.Cipher.ptr, - store.cipherEngine(), - (*C.uchar)(&key.CipherKey[0]), - (*C.uchar)(&key.IV[0])) - C.HMAC_Init_ex( - hctx, - unsafe.Pointer(&key.HMACKey[0]), - C.int(len(key.HMACKey)), - store.DigestCtx.Digest.ptr, - store.digestEngine()) - - if store.Keys.ShouldRenew(name) { - return ticket_resp_renewSession - } - - return ticket_resp_sessionOk - - default: - return ticket_resp_error - } -} - -// SetTicketStore sets the ticket store for the context so that clients can do -// ticket based session resumption. If the store is nil, the -func (c *Ctx) SetTicketStore(store *TicketStore) { - c.ticket_store = store - - if store == nil { - C.X_SSL_CTX_set_tlsext_ticket_key_cb(c.ctx, nil) - } else { - C.X_SSL_CTX_set_tlsext_ticket_key_cb(c.ctx, - (*[0]byte)(C.X_SSL_CTX_ticket_key_cb)) - } -} diff --git a/vendor/github.com/libp2p/go-openssl/utils/errors.go b/vendor/github.com/libp2p/go-openssl/utils/errors.go deleted file mode 100644 index bab314c9..00000000 --- a/vendor/github.com/libp2p/go-openssl/utils/errors.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "errors" - "strings" -) - -// ErrorGroup collates errors -type ErrorGroup struct { - Errors []error -} - -// Add adds an error to an existing error group -func (e *ErrorGroup) Add(err error) { - if err != nil { - e.Errors = append(e.Errors, err) - } -} - -// Finalize returns an error corresponding to the ErrorGroup state. If there's -// no errors in the group, finalize returns nil. If there's only one error, -// Finalize returns that error. Otherwise, Finalize will make a new error -// consisting of the messages from the constituent errors. -func (e *ErrorGroup) Finalize() error { - if len(e.Errors) == 0 { - return nil - } - if len(e.Errors) == 1 { - return e.Errors[0] - } - msgs := make([]string, 0, len(e.Errors)) - for _, err := range e.Errors { - msgs = append(msgs, err.Error()) - } - return errors.New(strings.Join(msgs, "\n")) -} diff --git a/vendor/github.com/libp2p/go-openssl/utils/future.go b/vendor/github.com/libp2p/go-openssl/utils/future.go deleted file mode 100644 index df2d8312..00000000 --- a/vendor/github.com/libp2p/go-openssl/utils/future.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package utils - -import ( - "sync" -) - -// Future is a type that is essentially the inverse of a channel. With a -// channel, you have multiple senders and one receiver. With a future, you can -// have multiple receivers and one sender. Additionally, a future protects -// against double-sends. Since this is usually used for returning function -// results, we also capture and return error values as well. Use NewFuture -// to initialize. -type Future struct { - mutex *sync.Mutex - cond *sync.Cond - received bool - val interface{} - err error -} - -// NewFuture returns an initialized and ready Future. -func NewFuture() *Future { - mutex := &sync.Mutex{} - return &Future{ - mutex: mutex, - cond: sync.NewCond(mutex), - received: false, - val: nil, - err: nil, - } -} - -// Get blocks until the Future has a value set. -func (f *Future) Get() (interface{}, error) { - f.mutex.Lock() - defer f.mutex.Unlock() - for { - if f.received { - return f.val, f.err - } - f.cond.Wait() - } -} - -// Fired returns whether or not a value has been set. If Fired is true, Get -// won't block. -func (f *Future) Fired() bool { - f.mutex.Lock() - defer f.mutex.Unlock() - return f.received -} - -// Set provides the value to present and future Get calls. If Set has already -// been called, this is a no-op. -func (f *Future) Set(val interface{}, err error) { - f.mutex.Lock() - defer f.mutex.Unlock() - if f.received { - return - } - f.received = true - f.val = val - f.err = err - f.cond.Broadcast() -} diff --git a/vendor/github.com/libp2p/go-openssl/version.json b/vendor/github.com/libp2p/go-openssl/version.json deleted file mode 100644 index 557859c5..00000000 --- a/vendor/github.com/libp2p/go-openssl/version.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "version": "v0.1.0" -} diff --git a/vendor/github.com/lucas-clemente/quic-go/README.md b/vendor/github.com/lucas-clemente/quic-go/README.md deleted file mode 100644 index 5efb4f43..00000000 --- a/vendor/github.com/lucas-clemente/quic-go/README.md +++ /dev/null @@ -1,62 +0,0 @@ -# A QUIC implementation in pure Go - - - -[![PkgGoDev](https://pkg.go.dev/badge/github.com/lucas-clemente/quic-go)](https://pkg.go.dev/github.com/lucas-clemente/quic-go) -[![Code Coverage](https://img.shields.io/codecov/c/github/lucas-clemente/quic-go/master.svg?style=flat-square)](https://codecov.io/gh/lucas-clemente/quic-go/) - -quic-go is an implementation of the QUIC protocol ([RFC 9000](https://datatracker.ietf.org/doc/html/rfc9000), [RFC 9001](https://datatracker.ietf.org/doc/html/rfc9001), [RFC 9002](https://datatracker.ietf.org/doc/html/rfc9002)) in Go, including the Unreliable Datagram Extension ([RFC 9221](https://datatracker.ietf.org/doc/html/rfc9221)) and Datagram Packetization Layer Path MTU - Discovery (DPLPMTUD, [RFC 8899](https://datatracker.ietf.org/doc/html/rfc8899)). It has support for HTTP/3 ([RFC 9114](https://datatracker.ietf.org/doc/html/rfc9114)), including QPACK ([RFC 9204](https://datatracker.ietf.org/doc/html/rfc9204)). - -In addition to the RFCs listed above, it currently implements the [IETF QUIC draft-29](https://tools.ietf.org/html/draft-ietf-quic-transport-29). Support for draft-29 will eventually be dropped, as it is phased out of the ecosystem. - -## Guides - -*We currently support Go 1.18.x and Go 1.19.x.* - -Running tests: - - go test ./... - -### QUIC without HTTP/3 - -Take a look at [this echo example](example/echo/echo.go). - -## Usage - -### As a server - -See the [example server](example/main.go). Starting a QUIC server is very similar to the standard lib http in go: - -```go -http.Handle("/", http.FileServer(http.Dir(wwwDir))) -http3.ListenAndServeQUIC("localhost:4242", "/path/to/cert/chain.pem", "/path/to/privkey.pem", nil) -``` - -### As a client - -See the [example client](example/client/main.go). Use a `http3.RoundTripper` as a `Transport` in a `http.Client`. - -```go -http.Client{ - Transport: &http3.RoundTripper{}, -} -``` - -## Projects using quic-go - -| Project | Description | Stars | -|------------------------------------------------------|--------------------------------------------------------------------------------------------------------|-------| -| [algernon](https://github.com/xyproto/algernon) | Small self-contained pure-Go web server with Lua, Markdown, HTTP/2, QUIC, Redis and PostgreSQL support | ![GitHub Repo stars](https://img.shields.io/github/stars/xyproto/algernon?style=flat-square) | -| [caddy](https://github.com/caddyserver/caddy/) | Fast, multi-platform web server with automatic HTTPS | ![GitHub Repo stars](https://img.shields.io/github/stars/caddyserver/caddy?style=flat-square) | -| [go-ipfs](https://github.com/ipfs/go-ipfs) | IPFS implementation in go | ![GitHub Repo stars](https://img.shields.io/github/stars/ipfs/go-ipfs?style=flat-square) | -| [syncthing](https://github.com/syncthing/syncthing/) | Open Source Continuous File Synchronization | ![GitHub Repo stars](https://img.shields.io/github/stars/syncthing/syncthing?style=flat-square) | -| [traefik](https://github.com/traefik/traefik) | The Cloud Native Application Proxy | ![GitHub Repo stars](https://img.shields.io/github/stars/traefik/traefik?style=flat-square) | -| [v2ray-core](https://github.com/v2fly/v2ray-core) | A platform for building proxies to bypass network restrictions | ![GitHub Repo stars](https://img.shields.io/github/stars/v2fly/v2ray-core?style=flat-square) | -| [cloudflared](https://github.com/cloudflare/cloudflared) | A tunneling daemon that proxies traffic from the Cloudflare network to your origins | ![GitHub Repo stars](https://img.shields.io/github/stars/cloudflare/cloudflared?style=flat-square) | -| [OONI Probe](https://github.com/ooni/probe-cli) | The Open Observatory of Network Interference (OONI) aims to empower decentralized efforts in documenting Internet censorship around the world. | ![GitHub Repo stars](https://img.shields.io/github/stars/ooni/probe-cli?style=flat-square) | -| [YoMo](https://github.com/yomorun/yomo) | Streaming Serverless Framework for Geo-distributed System | ![GitHub Repo stars](https://img.shields.io/github/stars/yomorun/yomo?style=flat-square) | - -## Contributing - -We are always happy to welcome new contributors! We have a number of self-contained issues that are suitable for first-time contributors, they are tagged with [help wanted](https://github.com/lucas-clemente/quic-go/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22). If you have any questions, please feel free to reach out by opening an issue or leaving a comment. diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/frame.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/frame.go deleted file mode 100644 index aed6038d..00000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/frame.go +++ /dev/null @@ -1,9 +0,0 @@ -package ackhandler - -import "github.com/lucas-clemente/quic-go/internal/wire" - -type Frame struct { - wire.Frame // nil if the frame has already been acknowledged in another packet - OnLost func(wire.Frame) - OnAcked func(wire.Frame) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/mockgen.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/mockgen.go deleted file mode 100644 index e957d253..00000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/mockgen.go +++ /dev/null @@ -1,3 +0,0 @@ -package ackhandler - -//go:generate sh -c "../../mockgen_private.sh ackhandler mock_sent_packet_tracker_test.go github.com/lucas-clemente/quic-go/internal/ackhandler sentPacketTracker" diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/mockgen.go b/vendor/github.com/lucas-clemente/quic-go/internal/handshake/mockgen.go deleted file mode 100644 index c7a8d13e..00000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/mockgen.go +++ /dev/null @@ -1,3 +0,0 @@ -package handshake - -//go:generate sh -c "../../mockgen_private.sh handshake mock_handshake_runner_test.go github.com/lucas-clemente/quic-go/internal/handshake handshakeRunner" diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/retry.go b/vendor/github.com/lucas-clemente/quic-go/internal/handshake/retry.go deleted file mode 100644 index b7cb20c1..00000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/retry.go +++ /dev/null @@ -1,62 +0,0 @@ -package handshake - -import ( - "bytes" - "crypto/aes" - "crypto/cipher" - "fmt" - "sync" - - "github.com/lucas-clemente/quic-go/internal/protocol" -) - -var ( - oldRetryAEAD cipher.AEAD // used for QUIC draft versions up to 34 - retryAEAD cipher.AEAD // used for QUIC draft-34 -) - -func init() { - oldRetryAEAD = initAEAD([16]byte{0xcc, 0xce, 0x18, 0x7e, 0xd0, 0x9a, 0x09, 0xd0, 0x57, 0x28, 0x15, 0x5a, 0x6c, 0xb9, 0x6b, 0xe1}) - retryAEAD = initAEAD([16]byte{0xbe, 0x0c, 0x69, 0x0b, 0x9f, 0x66, 0x57, 0x5a, 0x1d, 0x76, 0x6b, 0x54, 0xe3, 0x68, 0xc8, 0x4e}) -} - -func initAEAD(key [16]byte) cipher.AEAD { - aes, err := aes.NewCipher(key[:]) - if err != nil { - panic(err) - } - aead, err := cipher.NewGCM(aes) - if err != nil { - panic(err) - } - return aead -} - -var ( - retryBuf bytes.Buffer - retryMutex sync.Mutex - oldRetryNonce = [12]byte{0xe5, 0x49, 0x30, 0xf9, 0x7f, 0x21, 0x36, 0xf0, 0x53, 0x0a, 0x8c, 0x1c} - retryNonce = [12]byte{0x46, 0x15, 0x99, 0xd3, 0x5d, 0x63, 0x2b, 0xf2, 0x23, 0x98, 0x25, 0xbb} -) - -// GetRetryIntegrityTag calculates the integrity tag on a Retry packet -func GetRetryIntegrityTag(retry []byte, origDestConnID protocol.ConnectionID, version protocol.VersionNumber) *[16]byte { - retryMutex.Lock() - retryBuf.WriteByte(uint8(origDestConnID.Len())) - retryBuf.Write(origDestConnID.Bytes()) - retryBuf.Write(retry) - - var tag [16]byte - var sealed []byte - if version != protocol.Version1 { - sealed = oldRetryAEAD.Seal(tag[:0], oldRetryNonce[:], nil, retryBuf.Bytes()) - } else { - sealed = retryAEAD.Seal(tag[:0], retryNonce[:], nil, retryBuf.Bytes()) - } - if len(sealed) != 16 { - panic(fmt.Sprintf("unexpected Retry integrity tag length: %d", len(sealed))) - } - retryBuf.Reset() - retryMutex.Unlock() - return &tag -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go120.go b/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go120.go deleted file mode 100644 index f3553872..00000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go120.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build go1.20 - -package qtls - -var _ int = "The version of quic-go you're using can't be built on Go 1.20 yet. For more details, please see https://github.com/lucas-clemente/quic-go/wiki/quic-go-and-Go-versions." diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/atomic_bool.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/atomic_bool.go deleted file mode 100644 index cf464250..00000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/atomic_bool.go +++ /dev/null @@ -1,22 +0,0 @@ -package utils - -import "sync/atomic" - -// An AtomicBool is an atomic bool -type AtomicBool struct { - v int32 -} - -// Set sets the value -func (a *AtomicBool) Set(value bool) { - var n int32 - if value { - n = 1 - } - atomic.StoreInt32(&a.v, n) -} - -// Get gets the value -func (a *AtomicBool) Get() bool { - return atomic.LoadInt32(&a.v) != 0 -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/linkedlist/README.md b/vendor/github.com/lucas-clemente/quic-go/internal/utils/linkedlist/README.md deleted file mode 100644 index 2915d601..00000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/linkedlist/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Usage - -This is the Go standard library implementation of a linked list -(https://golang.org/src/container/list/list.go), modified to use Go generics. diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/extended_header.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/extended_header.go deleted file mode 100644 index 9d9edab2..00000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/extended_header.go +++ /dev/null @@ -1,249 +0,0 @@ -package wire - -import ( - "bytes" - "errors" - "fmt" - "io" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/quicvarint" -) - -// ErrInvalidReservedBits is returned when the reserved bits are incorrect. -// When this error is returned, parsing continues, and an ExtendedHeader is returned. -// This is necessary because we need to decrypt the packet in that case, -// in order to avoid a timing side-channel. -var ErrInvalidReservedBits = errors.New("invalid reserved bits") - -// ExtendedHeader is the header of a QUIC packet. -type ExtendedHeader struct { - Header - - typeByte byte - - KeyPhase protocol.KeyPhaseBit - - PacketNumberLen protocol.PacketNumberLen - PacketNumber protocol.PacketNumber - - parsedLen protocol.ByteCount -} - -func (h *ExtendedHeader) parse(b *bytes.Reader, v protocol.VersionNumber) (bool /* reserved bits valid */, error) { - startLen := b.Len() - // read the (now unencrypted) first byte - var err error - h.typeByte, err = b.ReadByte() - if err != nil { - return false, err - } - if _, err := b.Seek(int64(h.Header.ParsedLen())-1, io.SeekCurrent); err != nil { - return false, err - } - var reservedBitsValid bool - if h.IsLongHeader { - reservedBitsValid, err = h.parseLongHeader(b, v) - } else { - reservedBitsValid, err = h.parseShortHeader(b, v) - } - if err != nil { - return false, err - } - h.parsedLen = protocol.ByteCount(startLen - b.Len()) - return reservedBitsValid, err -} - -func (h *ExtendedHeader) parseLongHeader(b *bytes.Reader, _ protocol.VersionNumber) (bool /* reserved bits valid */, error) { - if err := h.readPacketNumber(b); err != nil { - return false, err - } - if h.typeByte&0xc != 0 { - return false, nil - } - return true, nil -} - -func (h *ExtendedHeader) parseShortHeader(b *bytes.Reader, _ protocol.VersionNumber) (bool /* reserved bits valid */, error) { - h.KeyPhase = protocol.KeyPhaseZero - if h.typeByte&0x4 > 0 { - h.KeyPhase = protocol.KeyPhaseOne - } - - if err := h.readPacketNumber(b); err != nil { - return false, err - } - if h.typeByte&0x18 != 0 { - return false, nil - } - return true, nil -} - -func (h *ExtendedHeader) readPacketNumber(b *bytes.Reader) error { - h.PacketNumberLen = protocol.PacketNumberLen(h.typeByte&0x3) + 1 - switch h.PacketNumberLen { - case protocol.PacketNumberLen1: - n, err := b.ReadByte() - if err != nil { - return err - } - h.PacketNumber = protocol.PacketNumber(n) - case protocol.PacketNumberLen2: - n, err := utils.BigEndian.ReadUint16(b) - if err != nil { - return err - } - h.PacketNumber = protocol.PacketNumber(n) - case protocol.PacketNumberLen3: - n, err := utils.BigEndian.ReadUint24(b) - if err != nil { - return err - } - h.PacketNumber = protocol.PacketNumber(n) - case protocol.PacketNumberLen4: - n, err := utils.BigEndian.ReadUint32(b) - if err != nil { - return err - } - h.PacketNumber = protocol.PacketNumber(n) - default: - return fmt.Errorf("invalid packet number length: %d", h.PacketNumberLen) - } - return nil -} - -// Write writes the Header. -func (h *ExtendedHeader) Write(b *bytes.Buffer, ver protocol.VersionNumber) error { - if h.DestConnectionID.Len() > protocol.MaxConnIDLen { - return fmt.Errorf("invalid connection ID length: %d bytes", h.DestConnectionID.Len()) - } - if h.SrcConnectionID.Len() > protocol.MaxConnIDLen { - return fmt.Errorf("invalid connection ID length: %d bytes", h.SrcConnectionID.Len()) - } - if h.IsLongHeader { - return h.writeLongHeader(b, ver) - } - return h.writeShortHeader(b, ver) -} - -func (h *ExtendedHeader) writeLongHeader(b *bytes.Buffer, version protocol.VersionNumber) error { - var packetType uint8 - if version == protocol.Version2 { - //nolint:exhaustive - switch h.Type { - case protocol.PacketTypeInitial: - packetType = 0b01 - case protocol.PacketType0RTT: - packetType = 0b10 - case protocol.PacketTypeHandshake: - packetType = 0b11 - case protocol.PacketTypeRetry: - packetType = 0b00 - } - } else { - //nolint:exhaustive - switch h.Type { - case protocol.PacketTypeInitial: - packetType = 0b00 - case protocol.PacketType0RTT: - packetType = 0b01 - case protocol.PacketTypeHandshake: - packetType = 0b10 - case protocol.PacketTypeRetry: - packetType = 0b11 - } - } - firstByte := 0xc0 | packetType<<4 - if h.Type != protocol.PacketTypeRetry { - // Retry packets don't have a packet number - firstByte |= uint8(h.PacketNumberLen - 1) - } - - b.WriteByte(firstByte) - utils.BigEndian.WriteUint32(b, uint32(h.Version)) - b.WriteByte(uint8(h.DestConnectionID.Len())) - b.Write(h.DestConnectionID.Bytes()) - b.WriteByte(uint8(h.SrcConnectionID.Len())) - b.Write(h.SrcConnectionID.Bytes()) - - //nolint:exhaustive - switch h.Type { - case protocol.PacketTypeRetry: - b.Write(h.Token) - return nil - case protocol.PacketTypeInitial: - quicvarint.Write(b, uint64(len(h.Token))) - b.Write(h.Token) - } - quicvarint.WriteWithLen(b, uint64(h.Length), 2) - return h.writePacketNumber(b) -} - -func (h *ExtendedHeader) writeShortHeader(b *bytes.Buffer, _ protocol.VersionNumber) error { - typeByte := 0x40 | uint8(h.PacketNumberLen-1) - if h.KeyPhase == protocol.KeyPhaseOne { - typeByte |= byte(1 << 2) - } - - b.WriteByte(typeByte) - b.Write(h.DestConnectionID.Bytes()) - return h.writePacketNumber(b) -} - -func (h *ExtendedHeader) writePacketNumber(b *bytes.Buffer) error { - switch h.PacketNumberLen { - case protocol.PacketNumberLen1: - b.WriteByte(uint8(h.PacketNumber)) - case protocol.PacketNumberLen2: - utils.BigEndian.WriteUint16(b, uint16(h.PacketNumber)) - case protocol.PacketNumberLen3: - utils.BigEndian.WriteUint24(b, uint32(h.PacketNumber)) - case protocol.PacketNumberLen4: - utils.BigEndian.WriteUint32(b, uint32(h.PacketNumber)) - default: - return fmt.Errorf("invalid packet number length: %d", h.PacketNumberLen) - } - return nil -} - -// ParsedLen returns the number of bytes that were consumed when parsing the header -func (h *ExtendedHeader) ParsedLen() protocol.ByteCount { - return h.parsedLen -} - -// GetLength determines the length of the Header. -func (h *ExtendedHeader) GetLength(v protocol.VersionNumber) protocol.ByteCount { - if h.IsLongHeader { - length := 1 /* type byte */ + 4 /* version */ + 1 /* dest conn ID len */ + protocol.ByteCount(h.DestConnectionID.Len()) + 1 /* src conn ID len */ + protocol.ByteCount(h.SrcConnectionID.Len()) + protocol.ByteCount(h.PacketNumberLen) + 2 /* length */ - if h.Type == protocol.PacketTypeInitial { - length += quicvarint.Len(uint64(len(h.Token))) + protocol.ByteCount(len(h.Token)) - } - return length - } - - length := protocol.ByteCount(1 /* type byte */ + h.DestConnectionID.Len()) - length += protocol.ByteCount(h.PacketNumberLen) - return length -} - -// Log logs the Header -func (h *ExtendedHeader) Log(logger utils.Logger) { - if h.IsLongHeader { - var token string - if h.Type == protocol.PacketTypeInitial || h.Type == protocol.PacketTypeRetry { - if len(h.Token) == 0 { - token = "Token: (empty), " - } else { - token = fmt.Sprintf("Token: %#x, ", h.Token) - } - if h.Type == protocol.PacketTypeRetry { - logger.Debugf("\tLong Header{Type: %s, DestConnectionID: %s, SrcConnectionID: %s, %sVersion: %s}", h.Type, h.DestConnectionID, h.SrcConnectionID, token, h.Version) - return - } - } - logger.Debugf("\tLong Header{Type: %s, DestConnectionID: %s, SrcConnectionID: %s, %sPacketNumber: %d, PacketNumberLen: %d, Length: %d, Version: %s}", h.Type, h.DestConnectionID, h.SrcConnectionID, token, h.PacketNumber, h.PacketNumberLen, h.Length, h.Version) - } else { - logger.Debugf("\tShort Header{DestConnectionID: %s, PacketNumber: %d, PacketNumberLen: %d, KeyPhase: %s}", h.DestConnectionID, h.PacketNumber, h.PacketNumberLen, h.KeyPhase) - } -} diff --git a/vendor/github.com/lucas-clemente/quic-go/logging/mockgen.go b/vendor/github.com/lucas-clemente/quic-go/logging/mockgen.go deleted file mode 100644 index a71871be..00000000 --- a/vendor/github.com/lucas-clemente/quic-go/logging/mockgen.go +++ /dev/null @@ -1,4 +0,0 @@ -package logging - -//go:generate sh -c "go run github.com/golang/mock/mockgen -package logging -self_package github.com/lucas-clemente/quic-go/logging -destination mock_connection_tracer_test.go github.com/lucas-clemente/quic-go/logging ConnectionTracer" -//go:generate sh -c "go run github.com/golang/mock/mockgen -package logging -self_package github.com/lucas-clemente/quic-go/logging -destination mock_tracer_test.go github.com/lucas-clemente/quic-go/logging Tracer" diff --git a/vendor/github.com/lucas-clemente/quic-go/packet_packer.go b/vendor/github.com/lucas-clemente/quic-go/packet_packer.go deleted file mode 100644 index 378e5766..00000000 --- a/vendor/github.com/lucas-clemente/quic-go/packet_packer.go +++ /dev/null @@ -1,827 +0,0 @@ -package quic - -import ( - "bytes" - "errors" - "fmt" - "net" - "time" - - "github.com/lucas-clemente/quic-go/internal/ackhandler" - "github.com/lucas-clemente/quic-go/internal/handshake" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -type packer interface { - PackCoalescedPacket(onlyAck bool) (*coalescedPacket, error) - PackPacket(onlyAck bool) (*packedPacket, error) - MaybePackProbePacket(protocol.EncryptionLevel) (*packedPacket, error) - PackConnectionClose(*qerr.TransportError) (*coalescedPacket, error) - PackApplicationClose(*qerr.ApplicationError) (*coalescedPacket, error) - - SetMaxPacketSize(protocol.ByteCount) - PackMTUProbePacket(ping ackhandler.Frame, size protocol.ByteCount) (*packedPacket, error) - - HandleTransportParameters(*wire.TransportParameters) - SetToken([]byte) -} - -type sealer interface { - handshake.LongHeaderSealer -} - -type payload struct { - frames []ackhandler.Frame - ack *wire.AckFrame - length protocol.ByteCount -} - -type packedPacket struct { - buffer *packetBuffer - *packetContents -} - -type packetContents struct { - header *wire.ExtendedHeader - ack *wire.AckFrame - frames []ackhandler.Frame - - length protocol.ByteCount - - isMTUProbePacket bool -} - -type coalescedPacket struct { - buffer *packetBuffer - packets []*packetContents -} - -func (p *packetContents) EncryptionLevel() protocol.EncryptionLevel { - if !p.header.IsLongHeader { - return protocol.Encryption1RTT - } - //nolint:exhaustive // Will never be called for Retry packets (and they don't have encrypted data). - switch p.header.Type { - case protocol.PacketTypeInitial: - return protocol.EncryptionInitial - case protocol.PacketTypeHandshake: - return protocol.EncryptionHandshake - case protocol.PacketType0RTT: - return protocol.Encryption0RTT - default: - panic("can't determine encryption level") - } -} - -func (p *packetContents) IsAckEliciting() bool { - return ackhandler.HasAckElicitingFrames(p.frames) -} - -func (p *packetContents) ToAckHandlerPacket(now time.Time, q *retransmissionQueue) *ackhandler.Packet { - largestAcked := protocol.InvalidPacketNumber - if p.ack != nil { - largestAcked = p.ack.LargestAcked() - } - encLevel := p.EncryptionLevel() - for i := range p.frames { - if p.frames[i].OnLost != nil { - continue - } - switch encLevel { - case protocol.EncryptionInitial: - p.frames[i].OnLost = q.AddInitial - case protocol.EncryptionHandshake: - p.frames[i].OnLost = q.AddHandshake - case protocol.Encryption0RTT, protocol.Encryption1RTT: - p.frames[i].OnLost = q.AddAppData - } - } - - ap := ackhandler.GetPacket() - ap.PacketNumber = p.header.PacketNumber - ap.LargestAcked = largestAcked - ap.Frames = p.frames - ap.Length = p.length - ap.EncryptionLevel = encLevel - ap.SendTime = now - ap.IsPathMTUProbePacket = p.isMTUProbePacket - return ap -} - -func getMaxPacketSize(addr net.Addr) protocol.ByteCount { - maxSize := protocol.ByteCount(protocol.MinInitialPacketSize) - // If this is not a UDP address, we don't know anything about the MTU. - // Use the minimum size of an Initial packet as the max packet size. - if udpAddr, ok := addr.(*net.UDPAddr); ok { - if utils.IsIPv4(udpAddr.IP) { - maxSize = protocol.InitialPacketSizeIPv4 - } else { - maxSize = protocol.InitialPacketSizeIPv6 - } - } - return maxSize -} - -type packetNumberManager interface { - PeekPacketNumber(protocol.EncryptionLevel) (protocol.PacketNumber, protocol.PacketNumberLen) - PopPacketNumber(protocol.EncryptionLevel) protocol.PacketNumber -} - -type sealingManager interface { - GetInitialSealer() (handshake.LongHeaderSealer, error) - GetHandshakeSealer() (handshake.LongHeaderSealer, error) - Get0RTTSealer() (handshake.LongHeaderSealer, error) - Get1RTTSealer() (handshake.ShortHeaderSealer, error) -} - -type frameSource interface { - HasData() bool - AppendStreamFrames([]ackhandler.Frame, protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount) - AppendControlFrames([]ackhandler.Frame, protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount) -} - -type ackFrameSource interface { - GetAckFrame(encLevel protocol.EncryptionLevel, onlyIfQueued bool) *wire.AckFrame -} - -type packetPacker struct { - srcConnID protocol.ConnectionID - getDestConnID func() protocol.ConnectionID - - perspective protocol.Perspective - version protocol.VersionNumber - cryptoSetup sealingManager - - initialStream cryptoStream - handshakeStream cryptoStream - - token []byte - - pnManager packetNumberManager - framer frameSource - acks ackFrameSource - datagramQueue *datagramQueue - retransmissionQueue *retransmissionQueue - - maxPacketSize protocol.ByteCount - numNonAckElicitingAcks int -} - -var _ packer = &packetPacker{} - -func newPacketPacker( - srcConnID protocol.ConnectionID, - getDestConnID func() protocol.ConnectionID, - initialStream cryptoStream, - handshakeStream cryptoStream, - packetNumberManager packetNumberManager, - retransmissionQueue *retransmissionQueue, - remoteAddr net.Addr, // only used for determining the max packet size - cryptoSetup sealingManager, - framer frameSource, - acks ackFrameSource, - datagramQueue *datagramQueue, - perspective protocol.Perspective, - version protocol.VersionNumber, -) *packetPacker { - return &packetPacker{ - cryptoSetup: cryptoSetup, - getDestConnID: getDestConnID, - srcConnID: srcConnID, - initialStream: initialStream, - handshakeStream: handshakeStream, - retransmissionQueue: retransmissionQueue, - datagramQueue: datagramQueue, - perspective: perspective, - version: version, - framer: framer, - acks: acks, - pnManager: packetNumberManager, - maxPacketSize: getMaxPacketSize(remoteAddr), - } -} - -// PackConnectionClose packs a packet that closes the connection with a transport error. -func (p *packetPacker) PackConnectionClose(e *qerr.TransportError) (*coalescedPacket, error) { - var reason string - // don't send details of crypto errors - if !e.ErrorCode.IsCryptoError() { - reason = e.ErrorMessage - } - return p.packConnectionClose(false, uint64(e.ErrorCode), e.FrameType, reason) -} - -// PackApplicationClose packs a packet that closes the connection with an application error. -func (p *packetPacker) PackApplicationClose(e *qerr.ApplicationError) (*coalescedPacket, error) { - return p.packConnectionClose(true, uint64(e.ErrorCode), 0, e.ErrorMessage) -} - -func (p *packetPacker) packConnectionClose( - isApplicationError bool, - errorCode uint64, - frameType uint64, - reason string, -) (*coalescedPacket, error) { - var sealers [4]sealer - var hdrs [4]*wire.ExtendedHeader - var payloads [4]*payload - var size protocol.ByteCount - var numPackets uint8 - encLevels := [4]protocol.EncryptionLevel{protocol.EncryptionInitial, protocol.EncryptionHandshake, protocol.Encryption0RTT, protocol.Encryption1RTT} - for i, encLevel := range encLevels { - if p.perspective == protocol.PerspectiveServer && encLevel == protocol.Encryption0RTT { - continue - } - ccf := &wire.ConnectionCloseFrame{ - IsApplicationError: isApplicationError, - ErrorCode: errorCode, - FrameType: frameType, - ReasonPhrase: reason, - } - // don't send application errors in Initial or Handshake packets - if isApplicationError && (encLevel == protocol.EncryptionInitial || encLevel == protocol.EncryptionHandshake) { - ccf.IsApplicationError = false - ccf.ErrorCode = uint64(qerr.ApplicationErrorErrorCode) - ccf.ReasonPhrase = "" - } - payload := &payload{ - frames: []ackhandler.Frame{{Frame: ccf}}, - length: ccf.Length(p.version), - } - - var sealer sealer - var err error - var keyPhase protocol.KeyPhaseBit // only set for 1-RTT - switch encLevel { - case protocol.EncryptionInitial: - sealer, err = p.cryptoSetup.GetInitialSealer() - case protocol.EncryptionHandshake: - sealer, err = p.cryptoSetup.GetHandshakeSealer() - case protocol.Encryption0RTT: - sealer, err = p.cryptoSetup.Get0RTTSealer() - case protocol.Encryption1RTT: - var s handshake.ShortHeaderSealer - s, err = p.cryptoSetup.Get1RTTSealer() - if err == nil { - keyPhase = s.KeyPhase() - } - sealer = s - } - if err == handshake.ErrKeysNotYetAvailable || err == handshake.ErrKeysDropped { - continue - } - if err != nil { - return nil, err - } - sealers[i] = sealer - var hdr *wire.ExtendedHeader - if encLevel == protocol.Encryption1RTT { - hdr = p.getShortHeader(keyPhase) - } else { - hdr = p.getLongHeader(encLevel) - } - hdrs[i] = hdr - payloads[i] = payload - size += p.packetLength(hdr, payload) + protocol.ByteCount(sealer.Overhead()) - numPackets++ - } - contents := make([]*packetContents, 0, numPackets) - buffer := getPacketBuffer() - for i, encLevel := range encLevels { - if sealers[i] == nil { - continue - } - var paddingLen protocol.ByteCount - if encLevel == protocol.EncryptionInitial { - paddingLen = p.initialPaddingLen(payloads[i].frames, size) - } - c, err := p.appendPacket(buffer, hdrs[i], payloads[i], paddingLen, encLevel, sealers[i], false) - if err != nil { - return nil, err - } - contents = append(contents, c) - } - return &coalescedPacket{buffer: buffer, packets: contents}, nil -} - -// packetLength calculates the length of the serialized packet. -// It takes into account that packets that have a tiny payload need to be padded, -// such that len(payload) + packet number len >= 4 + AEAD overhead -func (p *packetPacker) packetLength(hdr *wire.ExtendedHeader, payload *payload) protocol.ByteCount { - var paddingLen protocol.ByteCount - pnLen := protocol.ByteCount(hdr.PacketNumberLen) - if payload.length < 4-pnLen { - paddingLen = 4 - pnLen - payload.length - } - return hdr.GetLength(p.version) + payload.length + paddingLen -} - -// size is the expected size of the packet, if no padding was applied. -func (p *packetPacker) initialPaddingLen(frames []ackhandler.Frame, size protocol.ByteCount) protocol.ByteCount { - // For the server, only ack-eliciting Initial packets need to be padded. - if p.perspective == protocol.PerspectiveServer && !ackhandler.HasAckElicitingFrames(frames) { - return 0 - } - if size >= p.maxPacketSize { - return 0 - } - return p.maxPacketSize - size -} - -// PackCoalescedPacket packs a new packet. -// It packs an Initial / Handshake if there is data to send in these packet number spaces. -// It should only be called before the handshake is confirmed. -func (p *packetPacker) PackCoalescedPacket(onlyAck bool) (*coalescedPacket, error) { - maxPacketSize := p.maxPacketSize - if p.perspective == protocol.PerspectiveClient { - maxPacketSize = protocol.MinInitialPacketSize - } - var initialHdr, handshakeHdr, appDataHdr *wire.ExtendedHeader - var initialPayload, handshakePayload, appDataPayload *payload - var numPackets int - // Try packing an Initial packet. - initialSealer, err := p.cryptoSetup.GetInitialSealer() - if err != nil && err != handshake.ErrKeysDropped { - return nil, err - } - var size protocol.ByteCount - if initialSealer != nil { - initialHdr, initialPayload = p.maybeGetCryptoPacket(maxPacketSize-protocol.ByteCount(initialSealer.Overhead()), protocol.EncryptionInitial, onlyAck, true) - if initialPayload != nil { - size += p.packetLength(initialHdr, initialPayload) + protocol.ByteCount(initialSealer.Overhead()) - numPackets++ - } - } - - // Add a Handshake packet. - var handshakeSealer sealer - if (onlyAck && size == 0) || (!onlyAck && size < maxPacketSize-protocol.MinCoalescedPacketSize) { - var err error - handshakeSealer, err = p.cryptoSetup.GetHandshakeSealer() - if err != nil && err != handshake.ErrKeysDropped && err != handshake.ErrKeysNotYetAvailable { - return nil, err - } - if handshakeSealer != nil { - handshakeHdr, handshakePayload = p.maybeGetCryptoPacket(maxPacketSize-size-protocol.ByteCount(handshakeSealer.Overhead()), protocol.EncryptionHandshake, onlyAck, size == 0) - if handshakePayload != nil { - s := p.packetLength(handshakeHdr, handshakePayload) + protocol.ByteCount(handshakeSealer.Overhead()) - size += s - numPackets++ - } - } - } - - // Add a 0-RTT / 1-RTT packet. - var appDataSealer sealer - appDataEncLevel := protocol.Encryption1RTT - if (onlyAck && size == 0) || (!onlyAck && size < maxPacketSize-protocol.MinCoalescedPacketSize) { - var sErr error - var oneRTTSealer handshake.ShortHeaderSealer - oneRTTSealer, sErr = p.cryptoSetup.Get1RTTSealer() - appDataSealer = oneRTTSealer - if sErr != nil && p.perspective == protocol.PerspectiveClient { - appDataSealer, sErr = p.cryptoSetup.Get0RTTSealer() - appDataEncLevel = protocol.Encryption0RTT - } - if appDataSealer != nil && sErr == nil { - //nolint:exhaustive // 0-RTT and 1-RTT are the only two application data encryption levels. - switch appDataEncLevel { - case protocol.Encryption0RTT: - appDataHdr, appDataPayload = p.maybeGetAppDataPacketFor0RTT(appDataSealer, maxPacketSize-size) - case protocol.Encryption1RTT: - appDataHdr, appDataPayload = p.maybeGetShortHeaderPacket(oneRTTSealer, maxPacketSize-size, onlyAck, size == 0) - } - if appDataHdr != nil && appDataPayload != nil { - size += p.packetLength(appDataHdr, appDataPayload) + protocol.ByteCount(appDataSealer.Overhead()) - numPackets++ - } - } - } - - if numPackets == 0 { - return nil, nil - } - - buffer := getPacketBuffer() - packet := &coalescedPacket{ - buffer: buffer, - packets: make([]*packetContents, 0, numPackets), - } - if initialPayload != nil { - padding := p.initialPaddingLen(initialPayload.frames, size) - cont, err := p.appendPacket(buffer, initialHdr, initialPayload, padding, protocol.EncryptionInitial, initialSealer, false) - if err != nil { - return nil, err - } - packet.packets = append(packet.packets, cont) - } - if handshakePayload != nil { - cont, err := p.appendPacket(buffer, handshakeHdr, handshakePayload, 0, protocol.EncryptionHandshake, handshakeSealer, false) - if err != nil { - return nil, err - } - packet.packets = append(packet.packets, cont) - } - if appDataPayload != nil { - cont, err := p.appendPacket(buffer, appDataHdr, appDataPayload, 0, appDataEncLevel, appDataSealer, false) - if err != nil { - return nil, err - } - packet.packets = append(packet.packets, cont) - } - return packet, nil -} - -// PackPacket packs a packet in the application data packet number space. -// It should be called after the handshake is confirmed. -func (p *packetPacker) PackPacket(onlyAck bool) (*packedPacket, error) { - sealer, err := p.cryptoSetup.Get1RTTSealer() - if err != nil { - return nil, err - } - hdr, payload := p.maybeGetShortHeaderPacket(sealer, p.maxPacketSize, onlyAck, true) - if payload == nil { - return nil, nil - } - buffer := getPacketBuffer() - cont, err := p.appendPacket(buffer, hdr, payload, 0, protocol.Encryption1RTT, sealer, false) - if err != nil { - return nil, err - } - return &packedPacket{ - buffer: buffer, - packetContents: cont, - }, nil -} - -func (p *packetPacker) maybeGetCryptoPacket(maxPacketSize protocol.ByteCount, encLevel protocol.EncryptionLevel, onlyAck, ackAllowed bool) (*wire.ExtendedHeader, *payload) { - if onlyAck { - if ack := p.acks.GetAckFrame(encLevel, true); ack != nil { - var payload payload - payload.ack = ack - payload.length = ack.Length(p.version) - return p.getLongHeader(encLevel), &payload - } - return nil, nil - } - - var s cryptoStream - var hasRetransmission bool - //nolint:exhaustive // Initial and Handshake are the only two encryption levels here. - switch encLevel { - case protocol.EncryptionInitial: - s = p.initialStream - hasRetransmission = p.retransmissionQueue.HasInitialData() - case protocol.EncryptionHandshake: - s = p.handshakeStream - hasRetransmission = p.retransmissionQueue.HasHandshakeData() - } - - hasData := s.HasData() - var ack *wire.AckFrame - if ackAllowed { - ack = p.acks.GetAckFrame(encLevel, !hasRetransmission && !hasData) - } - if !hasData && !hasRetransmission && ack == nil { - // nothing to send - return nil, nil - } - - var payload payload - if ack != nil { - payload.ack = ack - payload.length = ack.Length(p.version) - maxPacketSize -= payload.length - } - hdr := p.getLongHeader(encLevel) - maxPacketSize -= hdr.GetLength(p.version) - if hasRetransmission { - for { - var f wire.Frame - //nolint:exhaustive // 0-RTT packets can't contain any retransmission.s - switch encLevel { - case protocol.EncryptionInitial: - f = p.retransmissionQueue.GetInitialFrame(maxPacketSize) - case protocol.EncryptionHandshake: - f = p.retransmissionQueue.GetHandshakeFrame(maxPacketSize) - } - if f == nil { - break - } - payload.frames = append(payload.frames, ackhandler.Frame{Frame: f}) - frameLen := f.Length(p.version) - payload.length += frameLen - maxPacketSize -= frameLen - } - } else if s.HasData() { - cf := s.PopCryptoFrame(maxPacketSize) - payload.frames = []ackhandler.Frame{{Frame: cf}} - payload.length += cf.Length(p.version) - } - return hdr, &payload -} - -func (p *packetPacker) maybeGetAppDataPacketFor0RTT(sealer sealer, maxPacketSize protocol.ByteCount) (*wire.ExtendedHeader, *payload) { - if p.perspective != protocol.PerspectiveClient { - return nil, nil - } - - hdr := p.getLongHeader(protocol.Encryption0RTT) - maxPayloadSize := maxPacketSize - hdr.GetLength(p.version) - protocol.ByteCount(sealer.Overhead()) - payload := p.maybeGetAppDataPacket(maxPayloadSize, false, false) - return hdr, payload -} - -func (p *packetPacker) maybeGetShortHeaderPacket(sealer handshake.ShortHeaderSealer, maxPacketSize protocol.ByteCount, onlyAck, ackAllowed bool) (*wire.ExtendedHeader, *payload) { - hdr := p.getShortHeader(sealer.KeyPhase()) - maxPayloadSize := maxPacketSize - hdr.GetLength(p.version) - protocol.ByteCount(sealer.Overhead()) - payload := p.maybeGetAppDataPacket(maxPayloadSize, onlyAck, ackAllowed) - return hdr, payload -} - -func (p *packetPacker) maybeGetAppDataPacket(maxPayloadSize protocol.ByteCount, onlyAck, ackAllowed bool) *payload { - payload := p.composeNextPacket(maxPayloadSize, onlyAck, ackAllowed) - - // check if we have anything to send - if len(payload.frames) == 0 { - if payload.ack == nil { - return nil - } - // the packet only contains an ACK - if p.numNonAckElicitingAcks >= protocol.MaxNonAckElicitingAcks { - ping := &wire.PingFrame{} - // don't retransmit the PING frame when it is lost - payload.frames = append(payload.frames, ackhandler.Frame{Frame: ping, OnLost: func(wire.Frame) {}}) - payload.length += ping.Length(p.version) - p.numNonAckElicitingAcks = 0 - } else { - p.numNonAckElicitingAcks++ - } - } else { - p.numNonAckElicitingAcks = 0 - } - return payload -} - -func (p *packetPacker) composeNextPacket(maxFrameSize protocol.ByteCount, onlyAck, ackAllowed bool) *payload { - if onlyAck { - if ack := p.acks.GetAckFrame(protocol.Encryption1RTT, true); ack != nil { - payload := &payload{} - payload.ack = ack - payload.length += ack.Length(p.version) - return payload - } - return &payload{} - } - - payload := &payload{frames: make([]ackhandler.Frame, 0, 1)} - - hasData := p.framer.HasData() - hasRetransmission := p.retransmissionQueue.HasAppData() - - var hasAck bool - if ackAllowed { - if ack := p.acks.GetAckFrame(protocol.Encryption1RTT, !hasRetransmission && !hasData); ack != nil { - payload.ack = ack - payload.length += ack.Length(p.version) - hasAck = true - } - } - - if p.datagramQueue != nil { - if f := p.datagramQueue.Peek(); f != nil { - size := f.Length(p.version) - if size <= maxFrameSize-payload.length { - payload.frames = append(payload.frames, ackhandler.Frame{ - Frame: f, - // set it to a no-op. Then we won't set the default callback, which would retransmit the frame. - OnLost: func(wire.Frame) {}, - }) - payload.length += size - p.datagramQueue.Pop() - } - } - } - - if hasAck && !hasData && !hasRetransmission { - return payload - } - - if hasRetransmission { - for { - remainingLen := maxFrameSize - payload.length - if remainingLen < protocol.MinStreamFrameSize { - break - } - f := p.retransmissionQueue.GetAppDataFrame(remainingLen) - if f == nil { - break - } - payload.frames = append(payload.frames, ackhandler.Frame{Frame: f}) - payload.length += f.Length(p.version) - } - } - - if hasData { - var lengthAdded protocol.ByteCount - payload.frames, lengthAdded = p.framer.AppendControlFrames(payload.frames, maxFrameSize-payload.length) - payload.length += lengthAdded - - payload.frames, lengthAdded = p.framer.AppendStreamFrames(payload.frames, maxFrameSize-payload.length) - payload.length += lengthAdded - } - return payload -} - -func (p *packetPacker) MaybePackProbePacket(encLevel protocol.EncryptionLevel) (*packedPacket, error) { - var hdr *wire.ExtendedHeader - var payload *payload - var sealer sealer - //nolint:exhaustive // Probe packets are never sent for 0-RTT. - switch encLevel { - case protocol.EncryptionInitial: - var err error - sealer, err = p.cryptoSetup.GetInitialSealer() - if err != nil { - return nil, err - } - hdr, payload = p.maybeGetCryptoPacket(p.maxPacketSize-protocol.ByteCount(sealer.Overhead()), protocol.EncryptionInitial, false, true) - case protocol.EncryptionHandshake: - var err error - sealer, err = p.cryptoSetup.GetHandshakeSealer() - if err != nil { - return nil, err - } - hdr, payload = p.maybeGetCryptoPacket(p.maxPacketSize-protocol.ByteCount(sealer.Overhead()), protocol.EncryptionHandshake, false, true) - case protocol.Encryption1RTT: - oneRTTSealer, err := p.cryptoSetup.Get1RTTSealer() - if err != nil { - return nil, err - } - sealer = oneRTTSealer - hdr = p.getShortHeader(oneRTTSealer.KeyPhase()) - payload = p.maybeGetAppDataPacket(p.maxPacketSize-protocol.ByteCount(sealer.Overhead())-hdr.GetLength(p.version), false, true) - default: - panic("unknown encryption level") - } - if payload == nil { - return nil, nil - } - size := p.packetLength(hdr, payload) + protocol.ByteCount(sealer.Overhead()) - var padding protocol.ByteCount - if encLevel == protocol.EncryptionInitial { - padding = p.initialPaddingLen(payload.frames, size) - } - buffer := getPacketBuffer() - cont, err := p.appendPacket(buffer, hdr, payload, padding, encLevel, sealer, false) - if err != nil { - return nil, err - } - return &packedPacket{ - buffer: buffer, - packetContents: cont, - }, nil -} - -func (p *packetPacker) PackMTUProbePacket(ping ackhandler.Frame, size protocol.ByteCount) (*packedPacket, error) { - payload := &payload{ - frames: []ackhandler.Frame{ping}, - length: ping.Length(p.version), - } - buffer := getPacketBuffer() - sealer, err := p.cryptoSetup.Get1RTTSealer() - if err != nil { - return nil, err - } - hdr := p.getShortHeader(sealer.KeyPhase()) - padding := size - p.packetLength(hdr, payload) - protocol.ByteCount(sealer.Overhead()) - contents, err := p.appendPacket(buffer, hdr, payload, padding, protocol.Encryption1RTT, sealer, true) - if err != nil { - return nil, err - } - contents.isMTUProbePacket = true - return &packedPacket{ - buffer: buffer, - packetContents: contents, - }, nil -} - -func (p *packetPacker) getShortHeader(kp protocol.KeyPhaseBit) *wire.ExtendedHeader { - pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) - hdr := &wire.ExtendedHeader{} - hdr.PacketNumber = pn - hdr.PacketNumberLen = pnLen - hdr.DestConnectionID = p.getDestConnID() - hdr.KeyPhase = kp - return hdr -} - -func (p *packetPacker) getLongHeader(encLevel protocol.EncryptionLevel) *wire.ExtendedHeader { - pn, pnLen := p.pnManager.PeekPacketNumber(encLevel) - hdr := &wire.ExtendedHeader{ - PacketNumber: pn, - PacketNumberLen: pnLen, - } - hdr.IsLongHeader = true - hdr.Version = p.version - hdr.SrcConnectionID = p.srcConnID - hdr.DestConnectionID = p.getDestConnID() - - //nolint:exhaustive // 1-RTT packets are not long header packets. - switch encLevel { - case protocol.EncryptionInitial: - hdr.Type = protocol.PacketTypeInitial - hdr.Token = p.token - case protocol.EncryptionHandshake: - hdr.Type = protocol.PacketTypeHandshake - case protocol.Encryption0RTT: - hdr.Type = protocol.PacketType0RTT - } - return hdr -} - -func (p *packetPacker) appendPacket(buffer *packetBuffer, header *wire.ExtendedHeader, payload *payload, padding protocol.ByteCount, encLevel protocol.EncryptionLevel, sealer sealer, isMTUProbePacket bool) (*packetContents, error) { - var paddingLen protocol.ByteCount - pnLen := protocol.ByteCount(header.PacketNumberLen) - if payload.length < 4-pnLen { - paddingLen = 4 - pnLen - payload.length - } - paddingLen += padding - if header.IsLongHeader { - header.Length = pnLen + protocol.ByteCount(sealer.Overhead()) + payload.length + paddingLen - } - - hdrOffset := buffer.Len() - buf := bytes.NewBuffer(buffer.Data) - if err := header.Write(buf, p.version); err != nil { - return nil, err - } - payloadOffset := buf.Len() - raw := buffer.Data[:payloadOffset] - - if payload.ack != nil { - var err error - raw, err = payload.ack.Append(raw, p.version) - if err != nil { - return nil, err - } - } - if paddingLen > 0 { - raw = append(raw, make([]byte, paddingLen)...) - } - for _, frame := range payload.frames { - var err error - raw, err = frame.Append(raw, p.version) - if err != nil { - return nil, err - } - } - - if payloadSize := protocol.ByteCount(len(raw)-payloadOffset) - paddingLen; payloadSize != payload.length { - return nil, fmt.Errorf("PacketPacker BUG: payload size inconsistent (expected %d, got %d bytes)", payload.length, payloadSize) - } - if !isMTUProbePacket { - if size := protocol.ByteCount(len(raw) + sealer.Overhead()); size > p.maxPacketSize { - return nil, fmt.Errorf("PacketPacker BUG: packet too large (%d bytes, allowed %d bytes)", size, p.maxPacketSize) - } - } - - // encrypt the packet - _ = sealer.Seal(raw[payloadOffset:payloadOffset], raw[payloadOffset:], header.PacketNumber, raw[hdrOffset:payloadOffset]) - raw = raw[0 : len(raw)+sealer.Overhead()] - // apply header protection - pnOffset := payloadOffset - int(header.PacketNumberLen) - sealer.EncryptHeader(raw[pnOffset+4:pnOffset+4+16], &raw[hdrOffset], raw[pnOffset:payloadOffset]) - buffer.Data = raw - - num := p.pnManager.PopPacketNumber(encLevel) - if num != header.PacketNumber { - return nil, errors.New("packetPacker BUG: Peeked and Popped packet numbers do not match") - } - return &packetContents{ - header: header, - ack: payload.ack, - frames: payload.frames, - length: buffer.Len() - hdrOffset, - }, nil -} - -func (p *packetPacker) SetToken(token []byte) { - p.token = token -} - -// When a higher MTU is discovered, use it. -func (p *packetPacker) SetMaxPacketSize(s protocol.ByteCount) { - p.maxPacketSize = s -} - -// If the peer sets a max_packet_size that's smaller than the size we're currently using, -// we need to reduce the size of packets we send. -func (p *packetPacker) HandleTransportParameters(params *wire.TransportParameters) { - if params.MaxUDPPayloadSize != 0 { - p.maxPacketSize = utils.Min(p.maxPacketSize, params.MaxUDPPayloadSize) - } -} diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/README.md b/vendor/github.com/marten-seemann/qtls-go1-18/README.md deleted file mode 100644 index 3e902212..00000000 --- a/vendor/github.com/marten-seemann/qtls-go1-18/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# qtls - -[![Go Reference](https://pkg.go.dev/badge/github.com/marten-seemann/qtls-go1-17.svg)](https://pkg.go.dev/github.com/marten-seemann/qtls-go1-17) -[![.github/workflows/go-test.yml](https://github.com/marten-seemann/qtls-go1-17/actions/workflows/go-test.yml/badge.svg)](https://github.com/marten-seemann/qtls-go1-17/actions/workflows/go-test.yml) - -This repository contains a modified version of the standard library's TLS implementation, modified for the QUIC protocol. It is used by [quic-go](https://github.com/lucas-clemente/quic-go). diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/README.md b/vendor/github.com/marten-seemann/qtls-go1-19/README.md deleted file mode 100644 index db260ba2..00000000 --- a/vendor/github.com/marten-seemann/qtls-go1-19/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# qtls - -[![Go Reference](https://pkg.go.dev/badge/github.com/marten-seemann/qtls-go1-19.svg)](https://pkg.go.dev/github.com/marten-seemann/qtls-go1-19) -[![.github/workflows/go-test.yml](https://github.com/marten-seemann/qtls-go1-19/actions/workflows/go-test.yml/badge.svg)](https://github.com/marten-seemann/qtls-go1-19/actions/workflows/go-test.yml) - -This repository contains a modified version of the standard library's TLS implementation, modified for the QUIC protocol. It is used by [quic-go](https://github.com/lucas-clemente/quic-go). diff --git a/vendor/github.com/mattn/go-pointer/README.md b/vendor/github.com/mattn/go-pointer/README.md deleted file mode 100644 index c74eee22..00000000 --- a/vendor/github.com/mattn/go-pointer/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# go-pointer - -Utility for cgo - -## Usage - -https://github.com/golang/proposal/blob/master/design/12416-cgo-pointers.md - -In go 1.6, cgo argument can't be passed Go pointer. - -``` -var s string -C.pass_pointer(pointer.Save(&s)) -v := *(pointer.Restore(C.get_from_pointer()).(*string)) -``` - -## Installation - -``` -go get github.com/mattn/go-pointer -``` - -## License - -MIT - -## Author - -Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-pointer/doc.go b/vendor/github.com/mattn/go-pointer/doc.go deleted file mode 100644 index c27bd8c0..00000000 --- a/vendor/github.com/mattn/go-pointer/doc.go +++ /dev/null @@ -1 +0,0 @@ -package pointer diff --git a/vendor/github.com/mattn/go-pointer/pointer.go b/vendor/github.com/mattn/go-pointer/pointer.go deleted file mode 100644 index 08a98533..00000000 --- a/vendor/github.com/mattn/go-pointer/pointer.go +++ /dev/null @@ -1,57 +0,0 @@ -package pointer - -// #include -import "C" -import ( - "sync" - "unsafe" -) - -var ( - mutex sync.RWMutex - store = map[unsafe.Pointer]interface{}{} -) - -func Save(v interface{}) unsafe.Pointer { - if v == nil { - return nil - } - - // Generate real fake C pointer. - // This pointer will not store any data, but will bi used for indexing purposes. - // Since Go doest allow to cast dangling pointer to unsafe.Pointer, we do rally allocate one byte. - // Why we need indexing, because Go doest allow C code to store pointers to Go data. - var ptr unsafe.Pointer = C.malloc(C.size_t(1)) - if ptr == nil { - panic("can't allocate 'cgo-pointer hack index pointer': ptr == nil") - } - - mutex.Lock() - store[ptr] = v - mutex.Unlock() - - return ptr -} - -func Restore(ptr unsafe.Pointer) (v interface{}) { - if ptr == nil { - return nil - } - - mutex.RLock() - v = store[ptr] - mutex.RUnlock() - return -} - -func Unref(ptr unsafe.Pointer) { - if ptr == nil { - return - } - - mutex.Lock() - delete(store, ptr) - mutex.Unlock() - - C.free(ptr) -} diff --git a/vendor/github.com/miekg/dns/LICENSE b/vendor/github.com/miekg/dns/LICENSE index 55f12ab7..852ab9ce 100644 --- a/vendor/github.com/miekg/dns/LICENSE +++ b/vendor/github.com/miekg/dns/LICENSE @@ -1,30 +1,29 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +BSD 3-Clause License + +Copyright (c) 2009, The Go Authors. Extensions copyright (c) 2011, Miek Gieben. +All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. -As this is fork of the official Go code the same license applies. -Extensions of the original work are copyright (c) 2011 Miek Gieben +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md index 5a799d88..06bea9fa 100644 --- a/vendor/github.com/miekg/dns/README.md +++ b/vendor/github.com/miekg/dns/README.md @@ -77,6 +77,10 @@ A not-so-up-to-date-list-that-may-be-actually-current: * https://ping.sx/dig * https://fleetdeck.io/ * https://github.com/markdingo/autoreverse +* https://github.com/slackhq/nebula +* https://addr.tools/ +* https://dnscheck.tools/ +* https://github.com/egbakou/domainverifier Send pull request if you want to be listed here. @@ -140,6 +144,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository. * 340{1,2,3} - NAPTR record * 3445 - Limiting the scope of (DNS)KEY * 3597 - Unknown RRs +* 4025 - A Method for Storing IPsec Keying Material in DNS * 403{3,4,5} - DNSSEC + validation functions * 4255 - SSHFP record * 4343 - Case insensitivity @@ -175,6 +180,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository. * 8080 - EdDSA for DNSSEC * 8499 - DNS Terminology * 8659 - DNS Certification Authority Authorization (CAA) Resource Record +* 8777 - DNS Reverse IP Automatic Multicast Tunneling (AMT) Discovery * 8914 - Extended DNS Errors * 8976 - Message Digest for DNS Zones (ZONEMD RR) diff --git a/vendor/github.com/miekg/dns/acceptfunc.go b/vendor/github.com/miekg/dns/acceptfunc.go index ac479db9..ab2812e3 100644 --- a/vendor/github.com/miekg/dns/acceptfunc.go +++ b/vendor/github.com/miekg/dns/acceptfunc.go @@ -19,7 +19,6 @@ type MsgAcceptFunc func(dh Header) MsgAcceptAction // * has more than 0 RRs in the Authority section // // * has more than 2 RRs in the Additional section -// var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc // MsgAcceptAction represents the action to be taken. diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go index 9aa65853..9051ae00 100644 --- a/vendor/github.com/miekg/dns/client.go +++ b/vendor/github.com/miekg/dns/client.go @@ -106,7 +106,6 @@ func (c *Client) Dial(address string) (conn *Conn, err error) { } // DialContext connects to the address on the named network, with a context.Context. -// For TLS over TCP (DoT) the context isn't used yet. This will be enabled when Go 1.18 is released. func (c *Client) DialContext(ctx context.Context, address string) (conn *Conn, err error) { // create a new dialer with the appropriate timeout var d net.Dialer @@ -127,15 +126,11 @@ func (c *Client) DialContext(ctx context.Context, address string) (conn *Conn, e if useTLS { network = strings.TrimSuffix(network, "-tls") - // TODO(miekg): Enable after Go 1.18 is released, to be able to support two prev. releases. - /* - tlsDialer := tls.Dialer{ - NetDialer: &d, - Config: c.TLSConfig, - } - conn.Conn, err = tlsDialer.DialContext(ctx, network, address) - */ - conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig) + tlsDialer := tls.Dialer{ + NetDialer: &d, + Config: c.TLSConfig, + } + conn.Conn, err = tlsDialer.DialContext(ctx, network, address) } else { conn.Conn, err = d.DialContext(ctx, network, address) } @@ -185,7 +180,7 @@ func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, er // that entails when using "tcp" and especially "tcp-tls" clients. // // When the singleflight is set for this client the context is _not_ forwarded to the (shared) exchange, to -// prevent one cancelation from canceling all outstanding requests. +// prevent one cancellation from canceling all outstanding requests. func (c *Client) ExchangeWithConn(m *Msg, conn *Conn) (r *Msg, rtt time.Duration, err error) { return c.exchangeWithConnContext(context.Background(), m, conn) } @@ -198,7 +193,7 @@ func (c *Client) exchangeWithConnContext(ctx context.Context, m *Msg, conn *Conn q := m.Question[0] key := fmt.Sprintf("%s:%d:%d", q.Name, q.Qtype, q.Qclass) r, rtt, err, shared := c.group.Do(key, func() (*Msg, time.Duration, error) { - // When we're doing singleflight we don't want one context cancelation, cancel _all_ outstanding queries. + // When we're doing singleflight we don't want one context cancellation, cancel _all_ outstanding queries. // Hence we ignore the context and use Background(). return c.exchangeContext(context.Background(), m, conn) }) @@ -431,7 +426,6 @@ func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) // co.WriteMsg(m) // in, _ := co.ReadMsg() // co.Close() -// func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) { println("dns: ExchangeConn: this function is deprecated") co := new(Conn) diff --git a/vendor/github.com/miekg/dns/clientconfig.go b/vendor/github.com/miekg/dns/clientconfig.go index e11b630d..d00ac62f 100644 --- a/vendor/github.com/miekg/dns/clientconfig.go +++ b/vendor/github.com/miekg/dns/clientconfig.go @@ -68,7 +68,7 @@ func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) { } case "search": // set search path to given servers - c.Search = append([]string(nil), f[1:]...) + c.Search = cloneSlice(f[1:]) case "options": // magic options for _, s := range f[1:] { diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go index f2cdbf43..75b17f0c 100644 --- a/vendor/github.com/miekg/dns/defaults.go +++ b/vendor/github.com/miekg/dns/defaults.go @@ -208,7 +208,7 @@ func IsDomainName(s string) (labels int, ok bool) { } // check for \DDD - if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) { + if isDDD(s[i+1:]) { i += 3 begin += 3 } else { diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go index ea01aa81..1be87eae 100644 --- a/vendor/github.com/miekg/dns/dnssec.go +++ b/vendor/github.com/miekg/dns/dnssec.go @@ -128,10 +128,6 @@ type dnskeyWireFmt struct { /* Nothing is left out */ } -func divRoundUp(a, b int) int { - return (a + b - 1) / b -} - // KeyTag calculates the keytag (or key-id) of the DNSKEY. func (k *DNSKEY) KeyTag() uint16 { if k == nil { @@ -417,11 +413,11 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { return err } - sigbuf := rr.sigBuf() // Get the binary signature data - if rr.Algorithm == PRIVATEDNS { // PRIVATEOID - // TODO(miek) - // remove the domain name and assume its ours? - } + sigbuf := rr.sigBuf() // Get the binary signature data + // TODO(miek) + // remove the domain name and assume its ours? + // if rr.Algorithm == PRIVATEDNS { // PRIVATEOID + // } h, cryptohash, err := hashFromAlgorithm(rr.Algorithm) if err != nil { diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go index f00f5722..586ab691 100644 --- a/vendor/github.com/miekg/dns/doc.go +++ b/vendor/github.com/miekg/dns/doc.go @@ -13,28 +13,28 @@ names in a message will result in a packing failure. Resource records are native types. They are not stored in wire format. Basic usage pattern for creating a new resource record: - r := new(dns.MX) - r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600} - r.Preference = 10 - r.Mx = "mx.miek.nl." + r := new(dns.MX) + r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600} + r.Preference = 10 + r.Mx = "mx.miek.nl." Or directly from a string: - mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.") + mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.") Or when the default origin (.) and TTL (3600) and class (IN) suit you: - mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl") + mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl") Or even: - mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek") + mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek") In the DNS messages are exchanged, these messages contain resource records (sets). Use pattern for creating a message: - m := new(dns.Msg) - m.SetQuestion("miek.nl.", dns.TypeMX) + m := new(dns.Msg) + m.SetQuestion("miek.nl.", dns.TypeMX) Or when not certain if the domain name is fully qualified: @@ -45,17 +45,17 @@ records for the miek.nl. zone. The following is slightly more verbose, but more flexible: - m1 := new(dns.Msg) - m1.Id = dns.Id() - m1.RecursionDesired = true - m1.Question = make([]dns.Question, 1) - m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET} + m1 := new(dns.Msg) + m1.Id = dns.Id() + m1.RecursionDesired = true + m1.Question = make([]dns.Question, 1) + m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET} After creating a message it can be sent. Basic use pattern for synchronous querying the DNS at a server configured on 127.0.0.1 and port 53: - c := new(dns.Client) - in, rtt, err := c.Exchange(m1, "127.0.0.1:53") + c := new(dns.Client) + in, rtt, err := c.Exchange(m1, "127.0.0.1:53") Suppressing multiple outstanding queries (with the same question, type and class) is as easy as setting: @@ -72,7 +72,7 @@ and port to use for the connection: Port: 12345, Zone: "", } - c.Dialer := &net.Dialer{ + c.Dialer = &net.Dialer{ Timeout: 200 * time.Millisecond, LocalAddr: &laddr, } @@ -96,7 +96,7 @@ the Answer section: // do something with t.Txt } -Domain Name and TXT Character String Representations +# Domain Name and TXT Character String Representations Both domain names and TXT character strings are converted to presentation form both when unpacked and when converted to strings. @@ -108,7 +108,7 @@ be escaped. Bytes below 32 and above 127 will be converted to \DDD form. For domain names, in addition to the above rules brackets, periods, spaces, semicolons and the at symbol are escaped. -DNSSEC +# DNSSEC DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It uses public key cryptography to sign resource records. The public keys are stored in @@ -117,12 +117,12 @@ DNSKEY records and the signatures in RRSIG records. Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit to a request. - m := new(dns.Msg) - m.SetEdns0(4096, true) + m := new(dns.Msg) + m.SetEdns0(4096, true) Signature generation, signature verification and key generation are all supported. -DYNAMIC UPDATES +# DYNAMIC UPDATES Dynamic updates reuses the DNS message format, but renames three of the sections. Question is Zone, Answer is Prerequisite, Authority is Update, only @@ -133,30 +133,30 @@ certain resource records or names in a zone to specify if resource records should be added or removed. The table from RFC 2136 supplemented with the Go DNS function shows which functions exist to specify the prerequisites. - 3.2.4 - Table Of Metavalues Used In Prerequisite Section + 3.2.4 - Table Of Metavalues Used In Prerequisite Section - CLASS TYPE RDATA Meaning Function - -------------------------------------------------------------- - ANY ANY empty Name is in use dns.NameUsed - ANY rrset empty RRset exists (value indep) dns.RRsetUsed - NONE ANY empty Name is not in use dns.NameNotUsed - NONE rrset empty RRset does not exist dns.RRsetNotUsed - zone rrset rr RRset exists (value dep) dns.Used + CLASS TYPE RDATA Meaning Function + -------------------------------------------------------------- + ANY ANY empty Name is in use dns.NameUsed + ANY rrset empty RRset exists (value indep) dns.RRsetUsed + NONE ANY empty Name is not in use dns.NameNotUsed + NONE rrset empty RRset does not exist dns.RRsetNotUsed + zone rrset rr RRset exists (value dep) dns.Used The prerequisite section can also be left empty. If you have decided on the prerequisites you can tell what RRs should be added or deleted. The next table shows the options you have and what functions to call. - 3.4.2.6 - Table Of Metavalues Used In Update Section + 3.4.2.6 - Table Of Metavalues Used In Update Section - CLASS TYPE RDATA Meaning Function - --------------------------------------------------------------- - ANY ANY empty Delete all RRsets from name dns.RemoveName - ANY rrset empty Delete an RRset dns.RemoveRRset - NONE rrset rr Delete an RR from RRset dns.Remove - zone rrset rr Add to an RRset dns.Insert + CLASS TYPE RDATA Meaning Function + --------------------------------------------------------------- + ANY ANY empty Delete all RRsets from name dns.RemoveName + ANY rrset empty Delete an RRset dns.RemoveRRset + NONE rrset rr Delete an RR from RRset dns.Remove + zone rrset rr Add to an RRset dns.Insert -TRANSACTION SIGNATURE +# TRANSACTION SIGNATURE An TSIG or transaction signature adds a HMAC TSIG record to each message sent. The supported algorithms include: HmacSHA1, HmacSHA256 and HmacSHA512. @@ -239,7 +239,7 @@ Basic use pattern validating and replying to a message that has TSIG set. w.WriteMsg(m) } -PRIVATE RRS +# PRIVATE RRS RFC 6895 sets aside a range of type codes for private use. This range is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these @@ -248,7 +248,7 @@ can be used, before requesting an official type code from IANA. See https://miek.nl/2014/september/21/idn-and-private-rr-in-go-dns/ for more information. -EDNS0 +# EDNS0 EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated by RFC 6891. It defines a new RR type, the OPT RR, which is then completely @@ -279,9 +279,9 @@ SIG(0) From RFC 2931: - SIG(0) provides protection for DNS transactions and requests .... - ... protection for glue records, DNS requests, protection for message headers - on requests and responses, and protection of the overall integrity of a response. + SIG(0) provides protection for DNS transactions and requests .... + ... protection for glue records, DNS requests, protection for message headers + on requests and responses, and protection of the overall integrity of a response. It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared secret approach in TSIG. Supported algorithms: ECDSAP256SHA256, diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go index 14568c2e..b5bdac81 100644 --- a/vendor/github.com/miekg/dns/edns.go +++ b/vendor/github.com/miekg/dns/edns.go @@ -78,7 +78,10 @@ func (rr *OPT) String() string { if rr.Do() { s += "flags: do; " } else { - s += "flags: ; " + s += "flags:; " + } + if rr.Hdr.Ttl&0x7FFF != 0 { + s += fmt.Sprintf("MBZ: 0x%04x, ", rr.Hdr.Ttl&0x7FFF) } s += "udp: " + strconv.Itoa(int(rr.UDPSize())) @@ -98,6 +101,8 @@ func (rr *OPT) String() string { s += "\n; SUBNET: " + o.String() case *EDNS0_COOKIE: s += "\n; COOKIE: " + o.String() + case *EDNS0_EXPIRE: + s += "\n; EXPIRE: " + o.String() case *EDNS0_TCP_KEEPALIVE: s += "\n; KEEPALIVE: " + o.String() case *EDNS0_UL: @@ -258,7 +263,7 @@ func (e *EDNS0_NSID) copy() EDNS0 { return &EDNS0_NSID{e.Code, e.Nsid} // o.Hdr.Name = "." // o.Hdr.Rrtype = dns.TypeOPT // e := new(dns.EDNS0_SUBNET) -// e.Code = dns.EDNS0SUBNET +// e.Code = dns.EDNS0SUBNET // by default this is filled in through unpacking OPT packets (unpackDataOpt) // e.Family = 1 // 1 for IPv4 source address, 2 for IPv6 // e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6 // e.SourceScope = 0 @@ -515,8 +520,8 @@ type EDNS0_DAU struct { // Option implements the EDNS0 interface. func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU } -func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil } +func (e *EDNS0_DAU) pack() ([]byte, error) { return cloneSlice(e.AlgCode), nil } +func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = cloneSlice(b); return nil } func (e *EDNS0_DAU) String() string { s := "" @@ -539,8 +544,8 @@ type EDNS0_DHU struct { // Option implements the EDNS0 interface. func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU } -func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil } +func (e *EDNS0_DHU) pack() ([]byte, error) { return cloneSlice(e.AlgCode), nil } +func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = cloneSlice(b); return nil } func (e *EDNS0_DHU) String() string { s := "" @@ -563,8 +568,8 @@ type EDNS0_N3U struct { // Option implements the EDNS0 interface. func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U } -func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil } +func (e *EDNS0_N3U) pack() ([]byte, error) { return cloneSlice(e.AlgCode), nil } +func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = cloneSlice(b); return nil } func (e *EDNS0_N3U) String() string { // Re-use the hash map @@ -641,30 +646,21 @@ type EDNS0_LOCAL struct { // Option implements the EDNS0 interface. func (e *EDNS0_LOCAL) Option() uint16 { return e.Code } + func (e *EDNS0_LOCAL) String() string { return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data) } + func (e *EDNS0_LOCAL) copy() EDNS0 { - b := make([]byte, len(e.Data)) - copy(b, e.Data) - return &EDNS0_LOCAL{e.Code, b} + return &EDNS0_LOCAL{e.Code, cloneSlice(e.Data)} } func (e *EDNS0_LOCAL) pack() ([]byte, error) { - b := make([]byte, len(e.Data)) - copied := copy(b, e.Data) - if copied != len(e.Data) { - return nil, ErrBuf - } - return b, nil + return cloneSlice(e.Data), nil } func (e *EDNS0_LOCAL) unpack(b []byte) error { - e.Data = make([]byte, len(b)) - copied := copy(e.Data, b) - if copied != len(b) { - return ErrBuf - } + e.Data = cloneSlice(b) return nil } @@ -727,14 +723,10 @@ type EDNS0_PADDING struct { // Option implements the EDNS0 interface. func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING } -func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil } -func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil } +func (e *EDNS0_PADDING) pack() ([]byte, error) { return cloneSlice(e.Padding), nil } +func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = cloneSlice(b); return nil } func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) } -func (e *EDNS0_PADDING) copy() EDNS0 { - b := make([]byte, len(e.Padding)) - copy(b, e.Padding) - return &EDNS0_PADDING{b} -} +func (e *EDNS0_PADDING) copy() EDNS0 { return &EDNS0_PADDING{cloneSlice(e.Padding)} } // Extended DNS Error Codes (RFC 8914). const ( @@ -821,7 +813,7 @@ func (e *EDNS0_EDE) String() string { func (e *EDNS0_EDE) pack() ([]byte, error) { b := make([]byte, 2+len(e.ExtraText)) binary.BigEndian.PutUint16(b[0:], e.InfoCode) - copy(b[2:], []byte(e.ExtraText)) + copy(b[2:], e.ExtraText) return b, nil } diff --git a/vendor/github.com/miekg/dns/fuzz.go b/vendor/github.com/miekg/dns/fuzz.go index 57410acd..505ae430 100644 --- a/vendor/github.com/miekg/dns/fuzz.go +++ b/vendor/github.com/miekg/dns/fuzz.go @@ -1,3 +1,4 @@ +//go:build fuzz // +build fuzz package dns diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/miekg/dns/labels.go index f9faacfe..cd498d2e 100644 --- a/vendor/github.com/miekg/dns/labels.go +++ b/vendor/github.com/miekg/dns/labels.go @@ -122,7 +122,7 @@ func Split(s string) []int { } // NextLabel returns the index of the start of the next label in the -// string s starting at offset. +// string s starting at offset. A negative offset will cause a panic. // The bool end is true when the end of the string has been reached. // Also see PrevLabel. func NextLabel(s string, offset int) (i int, end bool) { diff --git a/vendor/github.com/miekg/dns/listen_no_reuseport.go b/vendor/github.com/miekg/dns/listen_no_reuseport.go index b9201417..6ed50f86 100644 --- a/vendor/github.com/miekg/dns/listen_no_reuseport.go +++ b/vendor/github.com/miekg/dns/listen_no_reuseport.go @@ -1,4 +1,5 @@ -// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd package dns diff --git a/vendor/github.com/miekg/dns/listen_reuseport.go b/vendor/github.com/miekg/dns/listen_reuseport.go index fad195cf..89bac903 100644 --- a/vendor/github.com/miekg/dns/listen_reuseport.go +++ b/vendor/github.com/miekg/dns/listen_reuseport.go @@ -1,4 +1,4 @@ -// +build go1.11 +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd // +build aix darwin dragonfly freebsd linux netbsd openbsd package dns diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go index 89ebb64a..d5049a4f 100644 --- a/vendor/github.com/miekg/dns/msg.go +++ b/vendor/github.com/miekg/dns/msg.go @@ -252,7 +252,7 @@ loop: } // check for \DDD - if i+3 < ls && isDigit(bs[i+1]) && isDigit(bs[i+2]) && isDigit(bs[i+3]) { + if isDDD(bs[i+1:]) { bs[i] = dddToByte(bs[i+1:]) copy(bs[i+1:ls-3], bs[i+4:]) ls -= 3 @@ -448,7 +448,7 @@ Loop: return string(s), off1, nil } -func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) { +func packTxt(txt []string, msg []byte, offset int) (int, error) { if len(txt) == 0 { if offset >= len(msg) { return offset, ErrBuf @@ -458,10 +458,7 @@ func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) { } var err error for _, s := range txt { - if len(s) > len(tmp) { - return offset, ErrBuf - } - offset, err = packTxtString(s, msg, offset, tmp) + offset, err = packTxtString(s, msg, offset) if err != nil { return offset, err } @@ -469,32 +466,30 @@ func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) { return offset, nil } -func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) { +func packTxtString(s string, msg []byte, offset int) (int, error) { lenByteOffset := offset - if offset >= len(msg) || len(s) > len(tmp) { + if offset >= len(msg) || len(s) > 256*4+1 /* If all \DDD */ { return offset, ErrBuf } offset++ - bs := tmp[:len(s)] - copy(bs, s) - for i := 0; i < len(bs); i++ { + for i := 0; i < len(s); i++ { if len(msg) <= offset { return offset, ErrBuf } - if bs[i] == '\\' { + if s[i] == '\\' { i++ - if i == len(bs) { + if i == len(s) { break } // check for \DDD - if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { - msg[offset] = dddToByte(bs[i:]) + if isDDD(s[i:]) { + msg[offset] = dddToByte(s[i:]) i += 2 } else { - msg[offset] = bs[i] + msg[offset] = s[i] } } else { - msg[offset] = bs[i] + msg[offset] = s[i] } offset++ } @@ -522,7 +517,7 @@ func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) break } // check for \DDD - if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + if isDDD(bs[i:]) { msg[offset] = dddToByte(bs[i:]) i += 2 } else { @@ -551,12 +546,11 @@ func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) { // Helpers for dealing with escaped bytes func isDigit(b byte) bool { return b >= '0' && b <= '9' } -func dddToByte(s []byte) byte { - _ = s[2] // bounds check hint to compiler; see golang.org/issue/14808 - return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) +func isDDD[T ~[]byte | ~string](s T) bool { + return len(s) >= 3 && isDigit(s[0]) && isDigit(s[1]) && isDigit(s[2]) } -func dddStringToByte(s string) byte { +func dddToByte[T ~[]byte | ~string](s T) byte { _ = s[2] // bounds check hint to compiler; see golang.org/issue/14808 return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) } @@ -680,9 +674,9 @@ func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) // Convert a MsgHdr to a string, with dig-like headers: // -//;; opcode: QUERY, status: NOERROR, id: 48404 +// ;; opcode: QUERY, status: NOERROR, id: 48404 // -//;; flags: qr aa rd ra; +// ;; flags: qr aa rd ra; func (h *MsgHdr) String() string { if h == nil { return " MsgHdr" @@ -866,7 +860,7 @@ func (dns *Msg) unpack(dh Header, msg []byte, off int) (err error) { // The header counts might have been wrong so we need to update it dh.Nscount = uint16(len(dns.Ns)) if err == nil { - dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off) + dns.Extra, _, err = unpackRRslice(int(dh.Arcount), msg, off) } // The header counts might have been wrong so we need to update it dh.Arcount = uint16(len(dns.Extra)) @@ -876,11 +870,11 @@ func (dns *Msg) unpack(dh Header, msg []byte, off int) (err error) { dns.Rcode |= opt.ExtendedRcode() } - if off != len(msg) { - // TODO(miek) make this an error? - // use PackOpt to let people tell how detailed the error reporting should be? - // println("dns: extra bytes in dns packet", off, "<", len(msg)) - } + // TODO(miek) make this an error? + // use PackOpt to let people tell how detailed the error reporting should be? + // if off != len(msg) { + // // println("dns: extra bytes in dns packet", off, "<", len(msg)) + // } return err } @@ -1024,7 +1018,7 @@ func escapedNameLen(s string) int { continue } - if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) { + if isDDD(s[i+1:]) { nameLen -= 3 i += 3 } else { @@ -1065,8 +1059,8 @@ func (dns *Msg) CopyTo(r1 *Msg) *Msg { r1.Compress = dns.Compress if len(dns.Question) > 0 { - r1.Question = make([]Question, len(dns.Question)) - copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy + // TODO(miek): Question is an immutable value, ok to do a shallow-copy + r1.Question = cloneSlice(dns.Question) } rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra)) diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go index ea2035cd..8582fc0a 100644 --- a/vendor/github.com/miekg/dns/msg_helpers.go +++ b/vendor/github.com/miekg/dns/msg_helpers.go @@ -299,8 +299,7 @@ func unpackString(msg []byte, off int) (string, int, error) { } func packString(s string, msg []byte, off int) (int, error) { - txtTmp := make([]byte, 256*4+1) - off, err := packTxtString(s, msg, off, txtTmp) + off, err := packTxtString(s, msg, off) if err != nil { return len(msg), err } @@ -402,8 +401,7 @@ func unpackStringTxt(msg []byte, off int) ([]string, int, error) { } func packStringTxt(s []string, msg []byte, off int) (int, error) { - txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many. - off, err := packTxt(s, msg, off, txtTmp) + off, err := packTxt(s, msg, off) if err != nil { return len(msg), err } @@ -625,7 +623,7 @@ func unpackDataSVCB(msg []byte, off int) ([]SVCBKeyValue, int, error) { } func packDataSVCB(pairs []SVCBKeyValue, msg []byte, off int) (int, error) { - pairs = append([]SVCBKeyValue(nil), pairs...) + pairs = cloneSlice(pairs) sort.Slice(pairs, func(i, j int) bool { return pairs[i].Key() < pairs[j].Key() }) @@ -810,3 +808,37 @@ func unpackDataAplPrefix(msg []byte, off int) (APLPrefix, int, error) { Network: ipnet, }, off, nil } + +func unpackIPSECGateway(msg []byte, off int, gatewayType uint8) (net.IP, string, int, error) { + var retAddr net.IP + var retString string + var err error + + switch gatewayType { + case IPSECGatewayNone: // do nothing + case IPSECGatewayIPv4: + retAddr, off, err = unpackDataA(msg, off) + case IPSECGatewayIPv6: + retAddr, off, err = unpackDataAAAA(msg, off) + case IPSECGatewayHost: + retString, off, err = UnpackDomainName(msg, off) + } + + return retAddr, retString, off, err +} + +func packIPSECGateway(gatewayAddr net.IP, gatewayString string, msg []byte, off int, gatewayType uint8, compression compressionMap, compress bool) (int, error) { + var err error + + switch gatewayType { + case IPSECGatewayNone: // do nothing + case IPSECGatewayIPv4: + off, err = packDataA(gatewayAddr, msg, off) + case IPSECGatewayIPv6: + off, err = packDataAAAA(gatewayAddr, msg, off) + case IPSECGatewayHost: + off, err = packDomainName(gatewayString, msg, off, compression, compress) + } + + return off, err +} diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go index e398484d..2d44a398 100644 --- a/vendor/github.com/miekg/dns/scan_rr.go +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -3,6 +3,7 @@ package dns import ( "bytes" "encoding/base64" + "errors" "net" "strconv" "strings" @@ -1216,6 +1217,117 @@ func (rr *DS) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c, func (rr *DLV) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c, o, "DLV") } func (rr *CDS) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c, o, "CDS") } +func (rr *IPSECKEY) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + num, err := strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{"", "bad IPSECKEY value", l} + } + rr.Precedence = uint8(num) + c.Next() // zBlank + + l, _ = c.Next() + num, err = strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{"", "bad IPSECKEY value", l} + } + rr.GatewayType = uint8(num) + c.Next() // zBlank + + l, _ = c.Next() + num, err = strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{"", "bad IPSECKEY value", l} + } + rr.Algorithm = uint8(num) + c.Next() // zBlank + + l, _ = c.Next() + if l.err { + return &ParseError{"", "bad IPSECKEY gateway", l} + } + + rr.GatewayAddr, rr.GatewayHost, err = parseAddrHostUnion(l.token, o, rr.GatewayType) + if err != nil { + return &ParseError{"", "IPSECKEY " + err.Error(), l} + } + + c.Next() // zBlank + + s, pErr := endingToString(c, "bad IPSECKEY PublicKey") + if pErr != nil { + return pErr + } + rr.PublicKey = s + return slurpRemainder(c) +} + +func (rr *AMTRELAY) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + num, err := strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{"", "bad AMTRELAY value", l} + } + rr.Precedence = uint8(num) + c.Next() // zBlank + + l, _ = c.Next() + if l.err || !(l.token == "0" || l.token == "1") { + return &ParseError{"", "bad discovery value", l} + } + if l.token == "1" { + rr.GatewayType = 0x80 + } + + c.Next() // zBlank + + l, _ = c.Next() + num, err = strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{"", "bad AMTRELAY value", l} + } + rr.GatewayType |= uint8(num) + c.Next() // zBlank + + l, _ = c.Next() + if l.err { + return &ParseError{"", "bad AMTRELAY gateway", l} + } + + rr.GatewayAddr, rr.GatewayHost, err = parseAddrHostUnion(l.token, o, rr.GatewayType&0x7f) + if err != nil { + return &ParseError{"", "AMTRELAY " + err.Error(), l} + } + + return slurpRemainder(c) +} + +// same constants and parsing between IPSECKEY and AMTRELAY +func parseAddrHostUnion(token, o string, gatewayType uint8) (addr net.IP, host string, err error) { + switch gatewayType { + case IPSECGatewayNone: + if token != "." { + return addr, host, errors.New("gateway type none with gateway set") + } + case IPSECGatewayIPv4, IPSECGatewayIPv6: + addr = net.ParseIP(token) + if addr == nil { + return addr, host, errors.New("gateway IP invalid") + } + if (addr.To4() == nil) == (gatewayType == IPSECGatewayIPv4) { + return addr, host, errors.New("gateway IP family mismatch") + } + case IPSECGatewayHost: + var ok bool + host, ok = toAbsoluteName(token, o) + if !ok { + return addr, host, errors.New("invalid gateway host") + } + } + + return addr, host, nil +} + func (rr *RKEY) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go index 4e5a9aa8..64e38854 100644 --- a/vendor/github.com/miekg/dns/server.go +++ b/vendor/github.com/miekg/dns/server.go @@ -18,7 +18,7 @@ import ( const maxTCPQueries = 128 // aLongTimeAgo is a non-zero time, far in the past, used for -// immediate cancelation of network operations. +// immediate cancellation of network operations. var aLongTimeAgo = time.Unix(1, 0) // Handler is implemented by any value that implements ServeDNS. @@ -224,7 +224,7 @@ type Server struct { // Maximum number of TCP queries before we close the socket. Default is maxTCPQueries (unlimited if -1). MaxTCPQueries int // Whether to set the SO_REUSEPORT socket option, allowing multiple listeners to be bound to a single address. - // It is only supported on go1.11+ and when using ListenAndServe. + // It is only supported on certain GOOSes and when using ListenAndServe. ReusePort bool // AcceptMsgFunc will check the incoming message and will reject it early in the process. // By default DefaultMsgAcceptFunc will be used. diff --git a/vendor/github.com/miekg/dns/svcb.go b/vendor/github.com/miekg/dns/svcb.go index ea58710d..6d496d74 100644 --- a/vendor/github.com/miekg/dns/svcb.go +++ b/vendor/github.com/miekg/dns/svcb.go @@ -289,7 +289,7 @@ func (s *SVCBMandatory) String() string { } func (s *SVCBMandatory) pack() ([]byte, error) { - codes := append([]SVCBKey(nil), s.Code...) + codes := cloneSlice(s.Code) sort.Slice(codes, func(i, j int) bool { return codes[i] < codes[j] }) @@ -328,9 +328,7 @@ func (s *SVCBMandatory) len() int { } func (s *SVCBMandatory) copy() SVCBKeyValue { - return &SVCBMandatory{ - append([]SVCBKey(nil), s.Code...), - } + return &SVCBMandatory{cloneSlice(s.Code)} } // SVCBAlpn pair is used to list supported connection protocols. @@ -353,7 +351,7 @@ func (*SVCBAlpn) Key() SVCBKey { return SVCB_ALPN } func (s *SVCBAlpn) String() string { // An ALPN value is a comma-separated list of values, each of which can be // an arbitrary binary value. In order to allow parsing, the comma and - // backslash characters are themselves excaped. + // backslash characters are themselves escaped. // // However, this escaping is done in addition to the normal escaping which // happens in zone files, meaning that these values must be @@ -481,9 +479,7 @@ func (s *SVCBAlpn) len() int { } func (s *SVCBAlpn) copy() SVCBKeyValue { - return &SVCBAlpn{ - append([]string(nil), s.Alpn...), - } + return &SVCBAlpn{cloneSlice(s.Alpn)} } // SVCBNoDefaultAlpn pair signifies no support for default connection protocols. @@ -563,15 +559,15 @@ func (s *SVCBPort) parse(b string) error { // to the hinted IP address may be terminated and a new connection may be opened. // Basic use pattern for creating an ipv4hint option: // -// h := new(dns.HTTPS) -// h.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeHTTPS, Class: dns.ClassINET} -// e := new(dns.SVCBIPv4Hint) -// e.Hint = []net.IP{net.IPv4(1,1,1,1).To4()} +// h := new(dns.HTTPS) +// h.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeHTTPS, Class: dns.ClassINET} +// e := new(dns.SVCBIPv4Hint) +// e.Hint = []net.IP{net.IPv4(1,1,1,1).To4()} // -// Or +// Or // -// e.Hint = []net.IP{net.ParseIP("1.1.1.1").To4()} -// h.Value = append(h.Value, e) +// e.Hint = []net.IP{net.ParseIP("1.1.1.1").To4()} +// h.Value = append(h.Value, e) type SVCBIPv4Hint struct { Hint []net.IP } @@ -595,6 +591,7 @@ func (s *SVCBIPv4Hint) unpack(b []byte) error { if len(b) == 0 || len(b)%4 != 0 { return errors.New("dns: svcbipv4hint: ipv4 address byte array length is not a multiple of 4") } + b = cloneSlice(b) x := make([]net.IP, 0, len(b)/4) for i := 0; i < len(b); i += 4 { x = append(x, net.IP(b[i:i+4])) @@ -635,12 +632,9 @@ func (s *SVCBIPv4Hint) parse(b string) error { func (s *SVCBIPv4Hint) copy() SVCBKeyValue { hint := make([]net.IP, len(s.Hint)) for i, ip := range s.Hint { - hint[i] = copyIP(ip) - } - - return &SVCBIPv4Hint{ - Hint: hint, + hint[i] = cloneSlice(ip) } + return &SVCBIPv4Hint{Hint: hint} } // SVCBECHConfig pair contains the ECHConfig structure defined in draft-ietf-tls-esni [RFC xxxx]. @@ -660,19 +654,18 @@ func (s *SVCBECHConfig) String() string { return toBase64(s.ECH) } func (s *SVCBECHConfig) len() int { return len(s.ECH) } func (s *SVCBECHConfig) pack() ([]byte, error) { - return append([]byte(nil), s.ECH...), nil + return cloneSlice(s.ECH), nil } func (s *SVCBECHConfig) copy() SVCBKeyValue { - return &SVCBECHConfig{ - append([]byte(nil), s.ECH...), - } + return &SVCBECHConfig{cloneSlice(s.ECH)} } func (s *SVCBECHConfig) unpack(b []byte) error { - s.ECH = append([]byte(nil), b...) + s.ECH = cloneSlice(b) return nil } + func (s *SVCBECHConfig) parse(b string) error { x, err := fromBase64([]byte(b)) if err != nil { @@ -715,6 +708,7 @@ func (s *SVCBIPv6Hint) unpack(b []byte) error { if len(b) == 0 || len(b)%16 != 0 { return errors.New("dns: svcbipv6hint: ipv6 address byte array length not a multiple of 16") } + b = cloneSlice(b) x := make([]net.IP, 0, len(b)/16) for i := 0; i < len(b); i += 16 { ip := net.IP(b[i : i+16]) @@ -758,12 +752,9 @@ func (s *SVCBIPv6Hint) parse(b string) error { func (s *SVCBIPv6Hint) copy() SVCBKeyValue { hint := make([]net.IP, len(s.Hint)) for i, ip := range s.Hint { - hint[i] = copyIP(ip) - } - - return &SVCBIPv6Hint{ - Hint: hint, + hint[i] = cloneSlice(ip) } + return &SVCBIPv6Hint{Hint: hint} } // SVCBDoHPath pair is used to indicate the URI template that the @@ -831,11 +822,11 @@ type SVCBLocal struct { func (s *SVCBLocal) Key() SVCBKey { return s.KeyCode } func (s *SVCBLocal) String() string { return svcbParamToStr(s.Data) } -func (s *SVCBLocal) pack() ([]byte, error) { return append([]byte(nil), s.Data...), nil } +func (s *SVCBLocal) pack() ([]byte, error) { return cloneSlice(s.Data), nil } func (s *SVCBLocal) len() int { return len(s.Data) } func (s *SVCBLocal) unpack(b []byte) error { - s.Data = append([]byte(nil), b...) + s.Data = cloneSlice(b) return nil } @@ -849,9 +840,7 @@ func (s *SVCBLocal) parse(b string) error { } func (s *SVCBLocal) copy() SVCBKeyValue { - return &SVCBLocal{s.KeyCode, - append([]byte(nil), s.Data...), - } + return &SVCBLocal{s.KeyCode, cloneSlice(s.Data)} } func (rr *SVCB) String() string { @@ -867,8 +856,8 @@ func (rr *SVCB) String() string { // areSVCBPairArraysEqual checks if SVCBKeyValue arrays are equal after sorting their // copies. arrA and arrB have equal lengths, otherwise zduplicate.go wouldn't call this function. func areSVCBPairArraysEqual(a []SVCBKeyValue, b []SVCBKeyValue) bool { - a = append([]SVCBKeyValue(nil), a...) - b = append([]SVCBKeyValue(nil), b...) + a = cloneSlice(a) + b = cloneSlice(b) sort.Slice(a, func(i, j int) bool { return a[i].Key() < a[j].Key() }) sort.Slice(b, func(i, j int) bool { return b[i].Key() < b[j].Key() }) for i, e := range a { diff --git a/vendor/github.com/miekg/dns/tools.go b/vendor/github.com/miekg/dns/tools.go index d1118253..ccf8f6bf 100644 --- a/vendor/github.com/miekg/dns/tools.go +++ b/vendor/github.com/miekg/dns/tools.go @@ -1,3 +1,4 @@ +//go:build tools // +build tools // We include our tool dependencies for `go generate` here to ensure they're diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go index d9becb67..03afeccd 100644 --- a/vendor/github.com/miekg/dns/types.go +++ b/vendor/github.com/miekg/dns/types.go @@ -65,6 +65,7 @@ const ( TypeAPL uint16 = 42 TypeDS uint16 = 43 TypeSSHFP uint16 = 44 + TypeIPSECKEY uint16 = 45 TypeRRSIG uint16 = 46 TypeNSEC uint16 = 47 TypeDNSKEY uint16 = 48 @@ -98,6 +99,7 @@ const ( TypeURI uint16 = 256 TypeCAA uint16 = 257 TypeAVC uint16 = 258 + TypeAMTRELAY uint16 = 260 TypeTKEY uint16 = 249 TypeTSIG uint16 = 250 @@ -159,6 +161,22 @@ const ( ZoneMDHashAlgSHA512 = 2 ) +// Used in IPSEC https://datatracker.ietf.org/doc/html/rfc4025#section-2.3 +const ( + IPSECGatewayNone uint8 = iota + IPSECGatewayIPv4 + IPSECGatewayIPv6 + IPSECGatewayHost +) + +// Used in AMTRELAY https://datatracker.ietf.org/doc/html/rfc8777#section-4.2.3 +const ( + AMTRELAYNone = IPSECGatewayNone + AMTRELAYIPv4 = IPSECGatewayIPv4 + AMTRELAYIPv6 = IPSECGatewayIPv6 + AMTRELAYHost = IPSECGatewayHost +) + // Header is the wire format for the DNS packet header. type Header struct { Id uint16 @@ -180,7 +198,7 @@ const ( _CD = 1 << 4 // checking disabled ) -// Various constants used in the LOC RR. See RFC 1887. +// Various constants used in the LOC RR. See RFC 1876. const ( LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2. LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2. @@ -613,8 +631,8 @@ func nextByte(s string, offset int) (byte, int) { return 0, 0 case 2, 3: // too short to be \ddd default: // maybe \ddd - if isDigit(s[offset+1]) && isDigit(s[offset+2]) && isDigit(s[offset+3]) { - return dddStringToByte(s[offset+1:]), 4 + if isDDD(s[offset+1:]) { + return dddToByte(s[offset+1:]), 4 } } // not \ddd, just an RFC 1035 "quoted" character @@ -774,7 +792,10 @@ type LOC struct { // cmToM takes a cm value expressed in RFC 1876 SIZE mantissa/exponent // format and returns a string in m (two decimals for the cm). -func cmToM(m, e uint8) string { +func cmToM(x uint8) string { + m := x & 0xf0 >> 4 + e := x & 0x0f + if e < 2 { if e == 1 { m *= 10 @@ -830,10 +851,9 @@ func (rr *LOC) String() string { s += fmt.Sprintf("%.0fm ", alt) } - s += cmToM(rr.Size&0xf0>>4, rr.Size&0x0f) + "m " - s += cmToM(rr.HorizPre&0xf0>>4, rr.HorizPre&0x0f) + "m " - s += cmToM(rr.VertPre&0xf0>>4, rr.VertPre&0x0f) + "m" - + s += cmToM(rr.Size) + "m " + s += cmToM(rr.HorizPre) + "m " + s += cmToM(rr.VertPre) + "m" return s } @@ -994,6 +1014,69 @@ func (rr *DNSKEY) String() string { " " + rr.PublicKey } +// IPSECKEY RR. See RFC 4025. +type IPSECKEY struct { + Hdr RR_Header + Precedence uint8 + GatewayType uint8 + Algorithm uint8 + GatewayAddr net.IP `dns:"-"` // packing/unpacking/parsing/etc handled together with GatewayHost + GatewayHost string `dns:"ipsechost"` + PublicKey string `dns:"base64"` +} + +func (rr *IPSECKEY) String() string { + var gateway string + switch rr.GatewayType { + case IPSECGatewayIPv4, IPSECGatewayIPv6: + gateway = rr.GatewayAddr.String() + case IPSECGatewayHost: + gateway = rr.GatewayHost + case IPSECGatewayNone: + fallthrough + default: + gateway = "." + } + + return rr.Hdr.String() + strconv.Itoa(int(rr.Precedence)) + + " " + strconv.Itoa(int(rr.GatewayType)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + gateway + + " " + rr.PublicKey +} + +// AMTRELAY RR. See RFC 8777. +type AMTRELAY struct { + Hdr RR_Header + Precedence uint8 + GatewayType uint8 // discovery is packed in here at bit 0x80 + GatewayAddr net.IP `dns:"-"` // packing/unpacking/parsing/etc handled together with GatewayHost + GatewayHost string `dns:"amtrelayhost"` +} + +func (rr *AMTRELAY) String() string { + var gateway string + switch rr.GatewayType & 0x7f { + case AMTRELAYIPv4, AMTRELAYIPv6: + gateway = rr.GatewayAddr.String() + case AMTRELAYHost: + gateway = rr.GatewayHost + case AMTRELAYNone: + fallthrough + default: + gateway = "." + } + boolS := "0" + if rr.GatewayType&0x80 == 0x80 { + boolS = "1" + } + + return rr.Hdr.String() + strconv.Itoa(int(rr.Precedence)) + + " " + boolS + + " " + strconv.Itoa(int(rr.GatewayType&0x7f)) + + " " + gateway +} + // RKEY RR. See https://www.iana.org/assignments/dns-parameters/RKEY/rkey-completed-template. type RKEY struct { Hdr RR_Header @@ -1450,7 +1533,7 @@ func (a *APLPrefix) str() string { // equals reports whether two APL prefixes are identical. func (a *APLPrefix) equals(b *APLPrefix) bool { return a.Negation == b.Negation && - bytes.Equal(a.Network.IP, b.Network.IP) && + a.Network.IP.Equal(b.Network.IP) && bytes.Equal(a.Network.Mask, b.Network.Mask) } @@ -1518,21 +1601,19 @@ func euiToString(eui uint64, bits int) (hex string) { return } -// copyIP returns a copy of ip. -func copyIP(ip net.IP) net.IP { - p := make(net.IP, len(ip)) - copy(p, ip) - return p +// cloneSlice returns a shallow copy of s. +func cloneSlice[E any, S ~[]E](s S) S { + if s == nil { + return nil + } + return append(S(nil), s...) } // copyNet returns a copy of a subnet. func copyNet(n net.IPNet) net.IPNet { - m := make(net.IPMask, len(n.Mask)) - copy(m, n.Mask) - return net.IPNet{ - IP: copyIP(n.IP), - Mask: m, + IP: cloneSlice(n.IP), + Mask: cloneSlice(n.Mask), } } diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go index a4826ee2..c018ad43 100644 --- a/vendor/github.com/miekg/dns/udp.go +++ b/vendor/github.com/miekg/dns/udp.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package dns diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go index e7dd8ca3..a259b67e 100644 --- a/vendor/github.com/miekg/dns/udp_windows.go +++ b/vendor/github.com/miekg/dns/udp_windows.go @@ -1,5 +1,9 @@ +//go:build windows // +build windows +// TODO(tmthrgd): Remove this Windows-specific code if go.dev/issue/7175 and +// go.dev/issue/7174 are ever fixed. + package dns import "net" @@ -14,7 +18,6 @@ func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } // ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a // net.UDPAddr. -// TODO(fastest963): Once go1.10 is released, use ReadMsgUDP. func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { n, raddr, err := conn.ReadFrom(b) if err != nil { @@ -24,12 +27,9 @@ func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { } // WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr. -// TODO(fastest963): Once go1.10 is released, use WriteMsgUDP. func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { return conn.WriteTo(b, session.raddr) } -// TODO(fastest963): Once go1.10 is released and we can use *MsgUDP methods -// use the standard method in udp.go for these. func setUDPSocketOptions(*net.UDPConn) error { return nil } func parseDstFromOOB([]byte, net.IP) net.IP { return nil } diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go index b1a872bd..f03a169c 100644 --- a/vendor/github.com/miekg/dns/version.go +++ b/vendor/github.com/miekg/dns/version.go @@ -3,7 +3,7 @@ package dns import "fmt" // Version is current version of this library. -var Version = v{1, 1, 50} +var Version = v{1, 1, 53} // v holds the version of this library. type v struct { diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go index 1917e91c..0a831c88 100644 --- a/vendor/github.com/miekg/dns/xfr.go +++ b/vendor/github.com/miekg/dns/xfr.go @@ -44,7 +44,6 @@ func (t *Transfer) tsigProvider() TsigProvider { // dnscon := &dns.Conn{Conn:con} // transfer = &dns.Transfer{Conn: dnscon} // channel, err := transfer.In(message, master) -// func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) { switch q.Question[0].Qtype { case TypeAXFR, TypeIXFR: diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/miekg/dns/zduplicate.go index 9eb1dac2..450bbbc2 100644 --- a/vendor/github.com/miekg/dns/zduplicate.go +++ b/vendor/github.com/miekg/dns/zduplicate.go @@ -43,6 +43,32 @@ func (r1 *AFSDB) isDuplicate(_r2 RR) bool { return true } +func (r1 *AMTRELAY) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*AMTRELAY) + if !ok { + return false + } + _ = r2 + if r1.Precedence != r2.Precedence { + return false + } + if r1.GatewayType != r2.GatewayType { + return false + } + switch r1.GatewayType { + case IPSECGatewayIPv4, IPSECGatewayIPv6: + if !r1.GatewayAddr.Equal(r2.GatewayAddr) { + return false + } + case IPSECGatewayHost: + if !isDuplicateName(r1.GatewayHost, r2.GatewayHost) { + return false + } + } + + return true +} + func (r1 *ANY) isDuplicate(_r2 RR) bool { r2, ok := _r2.(*ANY) if !ok { @@ -423,6 +449,38 @@ func (r1 *HTTPS) isDuplicate(_r2 RR) bool { return true } +func (r1 *IPSECKEY) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*IPSECKEY) + if !ok { + return false + } + _ = r2 + if r1.Precedence != r2.Precedence { + return false + } + if r1.GatewayType != r2.GatewayType { + return false + } + if r1.Algorithm != r2.Algorithm { + return false + } + switch r1.GatewayType { + case IPSECGatewayIPv4, IPSECGatewayIPv6: + if !r1.GatewayAddr.Equal(r2.GatewayAddr) { + return false + } + case IPSECGatewayHost: + if !isDuplicateName(r1.GatewayHost, r2.GatewayHost) { + return false + } + } + + if r1.PublicKey != r2.PublicKey { + return false + } + return true +} + func (r1 *KEY) isDuplicate(_r2 RR) bool { r2, ok := _r2.(*KEY) if !ok { diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go index fc0822f9..3ea0eb42 100644 --- a/vendor/github.com/miekg/dns/zmsg.go +++ b/vendor/github.com/miekg/dns/zmsg.go @@ -32,6 +32,22 @@ func (rr *AFSDB) pack(msg []byte, off int, compression compressionMap, compress return off, nil } +func (rr *AMTRELAY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint8(rr.Precedence, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.GatewayType, msg, off) + if err != nil { + return off, err + } + off, err = packIPSECGateway(rr.GatewayAddr, rr.GatewayHost, msg, off, rr.GatewayType, compression, false) + if err != nil { + return off, err + } + return off, nil +} + func (rr *ANY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { return off, nil } @@ -332,6 +348,30 @@ func (rr *HTTPS) pack(msg []byte, off int, compression compressionMap, compress return off, nil } +func (rr *IPSECKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint8(rr.Precedence, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.GatewayType, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packIPSECGateway(rr.GatewayAddr, rr.GatewayHost, msg, off, rr.GatewayType, compression, false) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + return off, nil +} + func (rr *KEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.Flags, msg, off) if err != nil { @@ -1180,6 +1220,34 @@ func (rr *AFSDB) unpack(msg []byte, off int) (off1 int, err error) { return off, nil } +func (rr *AMTRELAY) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Precedence, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.GatewayType, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + if off == len(msg) { + return off, nil + } + rr.GatewayAddr, rr.GatewayHost, off, err = unpackIPSECGateway(msg, off, rr.GatewayType) + if err != nil { + return off, err + } + return off, nil +} + func (rr *ANY) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart @@ -1636,6 +1704,48 @@ func (rr *HTTPS) unpack(msg []byte, off int) (off1 int, err error) { return off, nil } +func (rr *IPSECKEY) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Precedence, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.GatewayType, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + if off == len(msg) { + return off, nil + } + rr.GatewayAddr, rr.GatewayHost, off, err = unpackIPSECGateway(msg, off, rr.GatewayType) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + func (rr *KEY) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go index 5d060cfe..1b6f4320 100644 --- a/vendor/github.com/miekg/dns/ztypes.go +++ b/vendor/github.com/miekg/dns/ztypes.go @@ -12,6 +12,7 @@ var TypeToRR = map[uint16]func() RR{ TypeA: func() RR { return new(A) }, TypeAAAA: func() RR { return new(AAAA) }, TypeAFSDB: func() RR { return new(AFSDB) }, + TypeAMTRELAY: func() RR { return new(AMTRELAY) }, TypeANY: func() RR { return new(ANY) }, TypeAPL: func() RR { return new(APL) }, TypeAVC: func() RR { return new(AVC) }, @@ -34,6 +35,7 @@ var TypeToRR = map[uint16]func() RR{ TypeHINFO: func() RR { return new(HINFO) }, TypeHIP: func() RR { return new(HIP) }, TypeHTTPS: func() RR { return new(HTTPS) }, + TypeIPSECKEY: func() RR { return new(IPSECKEY) }, TypeKEY: func() RR { return new(KEY) }, TypeKX: func() RR { return new(KX) }, TypeL32: func() RR { return new(L32) }, @@ -90,6 +92,7 @@ var TypeToString = map[uint16]string{ TypeA: "A", TypeAAAA: "AAAA", TypeAFSDB: "AFSDB", + TypeAMTRELAY: "AMTRELAY", TypeANY: "ANY", TypeAPL: "APL", TypeATMA: "ATMA", @@ -114,6 +117,7 @@ var TypeToString = map[uint16]string{ TypeHINFO: "HINFO", TypeHIP: "HIP", TypeHTTPS: "HTTPS", + TypeIPSECKEY: "IPSECKEY", TypeISDN: "ISDN", TypeIXFR: "IXFR", TypeKEY: "KEY", @@ -176,6 +180,7 @@ var TypeToString = map[uint16]string{ func (rr *A) Header() *RR_Header { return &rr.Hdr } func (rr *AAAA) Header() *RR_Header { return &rr.Hdr } func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr } +func (rr *AMTRELAY) Header() *RR_Header { return &rr.Hdr } func (rr *ANY) Header() *RR_Header { return &rr.Hdr } func (rr *APL) Header() *RR_Header { return &rr.Hdr } func (rr *AVC) Header() *RR_Header { return &rr.Hdr } @@ -198,6 +203,7 @@ func (rr *GPOS) Header() *RR_Header { return &rr.Hdr } func (rr *HINFO) Header() *RR_Header { return &rr.Hdr } func (rr *HIP) Header() *RR_Header { return &rr.Hdr } func (rr *HTTPS) Header() *RR_Header { return &rr.Hdr } +func (rr *IPSECKEY) Header() *RR_Header { return &rr.Hdr } func (rr *KEY) Header() *RR_Header { return &rr.Hdr } func (rr *KX) Header() *RR_Header { return &rr.Hdr } func (rr *L32) Header() *RR_Header { return &rr.Hdr } @@ -257,6 +263,7 @@ func (rr *A) len(off int, compression map[string]struct{}) int { } return l } + func (rr *AAAA) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) if len(rr.AAAA) != 0 { @@ -264,16 +271,34 @@ func (rr *AAAA) len(off int, compression map[string]struct{}) int { } return l } + func (rr *AFSDB) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Subtype l += domainNameLen(rr.Hostname, off+l, compression, false) return l } + +func (rr *AMTRELAY) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l++ // Precedence + l++ // GatewayType + switch rr.GatewayType { + case AMTRELAYIPv4: + l += net.IPv4len + case AMTRELAYIPv6: + l += net.IPv6len + case AMTRELAYHost: + l += len(rr.GatewayHost) + 1 + } + return l +} + func (rr *ANY) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) return l } + func (rr *APL) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) for _, x := range rr.Prefixes { @@ -281,6 +306,7 @@ func (rr *APL) len(off int, compression map[string]struct{}) int { } return l } + func (rr *AVC) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) for _, x := range rr.Txt { @@ -288,6 +314,7 @@ func (rr *AVC) len(off int, compression map[string]struct{}) int { } return l } + func (rr *CAA) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l++ // Flag @@ -295,6 +322,7 @@ func (rr *CAA) len(off int, compression map[string]struct{}) int { l += len(rr.Value) return l } + func (rr *CERT) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Type @@ -303,21 +331,25 @@ func (rr *CERT) len(off int, compression map[string]struct{}) int { l += base64.StdEncoding.DecodedLen(len(rr.Certificate)) return l } + func (rr *CNAME) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Target, off+l, compression, true) return l } + func (rr *DHCID) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += base64.StdEncoding.DecodedLen(len(rr.Digest)) return l } + func (rr *DNAME) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Target, off+l, compression, false) return l } + func (rr *DNSKEY) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Flags @@ -326,6 +358,7 @@ func (rr *DNSKEY) len(off int, compression map[string]struct{}) int { l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) return l } + func (rr *DS) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // KeyTag @@ -334,26 +367,31 @@ func (rr *DS) len(off int, compression map[string]struct{}) int { l += len(rr.Digest) / 2 return l } + func (rr *EID) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.Endpoint) / 2 return l } + func (rr *EUI48) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 6 // Address return l } + func (rr *EUI64) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 8 // Address return l } + func (rr *GID) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 4 // Gid return l } + func (rr *GPOS) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.Longitude) + 1 @@ -361,12 +399,14 @@ func (rr *GPOS) len(off int, compression map[string]struct{}) int { l += len(rr.Altitude) + 1 return l } + func (rr *HINFO) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.Cpu) + 1 l += len(rr.Os) + 1 return l } + func (rr *HIP) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l++ // HitLength @@ -379,12 +419,31 @@ func (rr *HIP) len(off int, compression map[string]struct{}) int { } return l } + +func (rr *IPSECKEY) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l++ // Precedence + l++ // GatewayType + l++ // Algorithm + switch rr.GatewayType { + case IPSECGatewayIPv4: + l += net.IPv4len + case IPSECGatewayIPv6: + l += net.IPv6len + case IPSECGatewayHost: + l += len(rr.GatewayHost) + 1 + } + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} + func (rr *KX) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference l += domainNameLen(rr.Exchanger, off+l, compression, false) return l } + func (rr *L32) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference @@ -393,12 +452,14 @@ func (rr *L32) len(off int, compression map[string]struct{}) int { } return l } + func (rr *L64) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference l += 8 // Locator64 return l } + func (rr *LOC) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l++ // Version @@ -410,49 +471,58 @@ func (rr *LOC) len(off int, compression map[string]struct{}) int { l += 4 // Altitude return l } + func (rr *LP) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference l += domainNameLen(rr.Fqdn, off+l, compression, false) return l } + func (rr *MB) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Mb, off+l, compression, true) return l } + func (rr *MD) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Md, off+l, compression, true) return l } + func (rr *MF) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Mf, off+l, compression, true) return l } + func (rr *MG) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Mg, off+l, compression, true) return l } + func (rr *MINFO) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Rmail, off+l, compression, true) l += domainNameLen(rr.Email, off+l, compression, true) return l } + func (rr *MR) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Mr, off+l, compression, true) return l } + func (rr *MX) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference l += domainNameLen(rr.Mx, off+l, compression, true) return l } + func (rr *NAPTR) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Order @@ -463,17 +533,20 @@ func (rr *NAPTR) len(off int, compression map[string]struct{}) int { l += domainNameLen(rr.Replacement, off+l, compression, false) return l } + func (rr *NID) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference l += 8 // NodeID return l } + func (rr *NIMLOC) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.Locator) / 2 return l } + func (rr *NINFO) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) for _, x := range rr.ZSData { @@ -481,16 +554,19 @@ func (rr *NINFO) len(off int, compression map[string]struct{}) int { } return l } + func (rr *NS) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Ns, off+l, compression, true) return l } + func (rr *NSAPPTR) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Ptr, off+l, compression, false) return l } + func (rr *NSEC3PARAM) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l++ // Hash @@ -500,21 +576,25 @@ func (rr *NSEC3PARAM) len(off int, compression map[string]struct{}) int { l += len(rr.Salt) / 2 return l } + func (rr *NULL) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.Data) return l } + func (rr *OPENPGPKEY) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) return l } + func (rr *PTR) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Ptr, off+l, compression, true) return l } + func (rr *PX) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference @@ -522,11 +602,13 @@ func (rr *PX) len(off int, compression map[string]struct{}) int { l += domainNameLen(rr.Mapx400, off+l, compression, false) return l } + func (rr *RFC3597) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.Rdata) / 2 return l } + func (rr *RKEY) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Flags @@ -535,12 +617,14 @@ func (rr *RKEY) len(off int, compression map[string]struct{}) int { l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) return l } + func (rr *RP) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Mbox, off+l, compression, false) l += domainNameLen(rr.Txt, off+l, compression, false) return l } + func (rr *RRSIG) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // TypeCovered @@ -554,12 +638,14 @@ func (rr *RRSIG) len(off int, compression map[string]struct{}) int { l += base64.StdEncoding.DecodedLen(len(rr.Signature)) return l } + func (rr *RT) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference l += domainNameLen(rr.Host, off+l, compression, false) return l } + func (rr *SMIMEA) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l++ // Usage @@ -568,6 +654,7 @@ func (rr *SMIMEA) len(off int, compression map[string]struct{}) int { l += len(rr.Certificate) / 2 return l } + func (rr *SOA) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Ns, off+l, compression, true) @@ -579,6 +666,7 @@ func (rr *SOA) len(off int, compression map[string]struct{}) int { l += 4 // Minttl return l } + func (rr *SPF) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) for _, x := range rr.Txt { @@ -586,6 +674,7 @@ func (rr *SPF) len(off int, compression map[string]struct{}) int { } return l } + func (rr *SRV) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Priority @@ -594,6 +683,7 @@ func (rr *SRV) len(off int, compression map[string]struct{}) int { l += domainNameLen(rr.Target, off+l, compression, false) return l } + func (rr *SSHFP) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l++ // Algorithm @@ -601,6 +691,7 @@ func (rr *SSHFP) len(off int, compression map[string]struct{}) int { l += len(rr.FingerPrint) / 2 return l } + func (rr *SVCB) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Priority @@ -610,6 +701,7 @@ func (rr *SVCB) len(off int, compression map[string]struct{}) int { } return l } + func (rr *TA) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // KeyTag @@ -618,12 +710,14 @@ func (rr *TA) len(off int, compression map[string]struct{}) int { l += len(rr.Digest) / 2 return l } + func (rr *TALINK) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.PreviousName, off+l, compression, false) l += domainNameLen(rr.NextName, off+l, compression, false) return l } + func (rr *TKEY) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Algorithm, off+l, compression, false) @@ -637,6 +731,7 @@ func (rr *TKEY) len(off int, compression map[string]struct{}) int { l += len(rr.OtherData) / 2 return l } + func (rr *TLSA) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l++ // Usage @@ -645,6 +740,7 @@ func (rr *TLSA) len(off int, compression map[string]struct{}) int { l += len(rr.Certificate) / 2 return l } + func (rr *TSIG) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Algorithm, off+l, compression, false) @@ -658,6 +754,7 @@ func (rr *TSIG) len(off int, compression map[string]struct{}) int { l += len(rr.OtherData) / 2 return l } + func (rr *TXT) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) for _, x := range rr.Txt { @@ -665,16 +762,19 @@ func (rr *TXT) len(off int, compression map[string]struct{}) int { } return l } + func (rr *UID) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 4 // Uid return l } + func (rr *UINFO) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.Uinfo) + 1 return l } + func (rr *URI) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Priority @@ -682,11 +782,13 @@ func (rr *URI) len(off int, compression map[string]struct{}) int { l += len(rr.Target) return l } + func (rr *X25) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.PSDNAddress) + 1 return l } + func (rr *ZONEMD) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 4 // Serial @@ -698,17 +800,31 @@ func (rr *ZONEMD) len(off int, compression map[string]struct{}) int { // copy() functions func (rr *A) copy() RR { - return &A{rr.Hdr, copyIP(rr.A)} + return &A{rr.Hdr, cloneSlice(rr.A)} } + func (rr *AAAA) copy() RR { - return &AAAA{rr.Hdr, copyIP(rr.AAAA)} + return &AAAA{rr.Hdr, cloneSlice(rr.AAAA)} } + func (rr *AFSDB) copy() RR { return &AFSDB{rr.Hdr, rr.Subtype, rr.Hostname} } + +func (rr *AMTRELAY) copy() RR { + return &AMTRELAY{ + rr.Hdr, + rr.Precedence, + rr.GatewayType, + cloneSlice(rr.GatewayAddr), + rr.GatewayHost, + } +} + func (rr *ANY) copy() RR { return &ANY{rr.Hdr} } + func (rr *APL) copy() RR { Prefixes := make([]APLPrefix, len(rr.Prefixes)) for i, e := range rr.Prefixes { @@ -716,150 +832,270 @@ func (rr *APL) copy() RR { } return &APL{rr.Hdr, Prefixes} } + func (rr *AVC) copy() RR { - Txt := make([]string, len(rr.Txt)) - copy(Txt, rr.Txt) - return &AVC{rr.Hdr, Txt} + return &AVC{rr.Hdr, cloneSlice(rr.Txt)} } + func (rr *CAA) copy() RR { - return &CAA{rr.Hdr, rr.Flag, rr.Tag, rr.Value} + return &CAA{ + rr.Hdr, + rr.Flag, + rr.Tag, + rr.Value, + } } + func (rr *CDNSKEY) copy() RR { return &CDNSKEY{*rr.DNSKEY.copy().(*DNSKEY)} } + func (rr *CDS) copy() RR { return &CDS{*rr.DS.copy().(*DS)} } + func (rr *CERT) copy() RR { - return &CERT{rr.Hdr, rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate} + return &CERT{ + rr.Hdr, + rr.Type, + rr.KeyTag, + rr.Algorithm, + rr.Certificate, + } } + func (rr *CNAME) copy() RR { return &CNAME{rr.Hdr, rr.Target} } + func (rr *CSYNC) copy() RR { - TypeBitMap := make([]uint16, len(rr.TypeBitMap)) - copy(TypeBitMap, rr.TypeBitMap) - return &CSYNC{rr.Hdr, rr.Serial, rr.Flags, TypeBitMap} + return &CSYNC{ + rr.Hdr, + rr.Serial, + rr.Flags, + cloneSlice(rr.TypeBitMap), + } } + func (rr *DHCID) copy() RR { return &DHCID{rr.Hdr, rr.Digest} } + func (rr *DLV) copy() RR { return &DLV{*rr.DS.copy().(*DS)} } + func (rr *DNAME) copy() RR { return &DNAME{rr.Hdr, rr.Target} } + func (rr *DNSKEY) copy() RR { - return &DNSKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} + return &DNSKEY{ + rr.Hdr, + rr.Flags, + rr.Protocol, + rr.Algorithm, + rr.PublicKey, + } } + func (rr *DS) copy() RR { - return &DS{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} + return &DS{ + rr.Hdr, + rr.KeyTag, + rr.Algorithm, + rr.DigestType, + rr.Digest, + } } + func (rr *EID) copy() RR { return &EID{rr.Hdr, rr.Endpoint} } + func (rr *EUI48) copy() RR { return &EUI48{rr.Hdr, rr.Address} } + func (rr *EUI64) copy() RR { return &EUI64{rr.Hdr, rr.Address} } + func (rr *GID) copy() RR { return &GID{rr.Hdr, rr.Gid} } + func (rr *GPOS) copy() RR { - return &GPOS{rr.Hdr, rr.Longitude, rr.Latitude, rr.Altitude} + return &GPOS{ + rr.Hdr, + rr.Longitude, + rr.Latitude, + rr.Altitude, + } } + func (rr *HINFO) copy() RR { return &HINFO{rr.Hdr, rr.Cpu, rr.Os} } + func (rr *HIP) copy() RR { - RendezvousServers := make([]string, len(rr.RendezvousServers)) - copy(RendezvousServers, rr.RendezvousServers) - return &HIP{rr.Hdr, rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers} + return &HIP{ + rr.Hdr, + rr.HitLength, + rr.PublicKeyAlgorithm, + rr.PublicKeyLength, + rr.Hit, + rr.PublicKey, + cloneSlice(rr.RendezvousServers), + } } + func (rr *HTTPS) copy() RR { return &HTTPS{*rr.SVCB.copy().(*SVCB)} } + +func (rr *IPSECKEY) copy() RR { + return &IPSECKEY{ + rr.Hdr, + rr.Precedence, + rr.GatewayType, + rr.Algorithm, + cloneSlice(rr.GatewayAddr), + rr.GatewayHost, + rr.PublicKey, + } +} + func (rr *KEY) copy() RR { return &KEY{*rr.DNSKEY.copy().(*DNSKEY)} } + func (rr *KX) copy() RR { return &KX{rr.Hdr, rr.Preference, rr.Exchanger} } + func (rr *L32) copy() RR { - return &L32{rr.Hdr, rr.Preference, copyIP(rr.Locator32)} + return &L32{rr.Hdr, rr.Preference, cloneSlice(rr.Locator32)} } + func (rr *L64) copy() RR { return &L64{rr.Hdr, rr.Preference, rr.Locator64} } + func (rr *LOC) copy() RR { - return &LOC{rr.Hdr, rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude} + return &LOC{ + rr.Hdr, + rr.Version, + rr.Size, + rr.HorizPre, + rr.VertPre, + rr.Latitude, + rr.Longitude, + rr.Altitude, + } } + func (rr *LP) copy() RR { return &LP{rr.Hdr, rr.Preference, rr.Fqdn} } + func (rr *MB) copy() RR { return &MB{rr.Hdr, rr.Mb} } + func (rr *MD) copy() RR { return &MD{rr.Hdr, rr.Md} } + func (rr *MF) copy() RR { return &MF{rr.Hdr, rr.Mf} } + func (rr *MG) copy() RR { return &MG{rr.Hdr, rr.Mg} } + func (rr *MINFO) copy() RR { return &MINFO{rr.Hdr, rr.Rmail, rr.Email} } + func (rr *MR) copy() RR { return &MR{rr.Hdr, rr.Mr} } + func (rr *MX) copy() RR { return &MX{rr.Hdr, rr.Preference, rr.Mx} } + func (rr *NAPTR) copy() RR { - return &NAPTR{rr.Hdr, rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement} + return &NAPTR{ + rr.Hdr, + rr.Order, + rr.Preference, + rr.Flags, + rr.Service, + rr.Regexp, + rr.Replacement, + } } + func (rr *NID) copy() RR { return &NID{rr.Hdr, rr.Preference, rr.NodeID} } + func (rr *NIMLOC) copy() RR { return &NIMLOC{rr.Hdr, rr.Locator} } + func (rr *NINFO) copy() RR { - ZSData := make([]string, len(rr.ZSData)) - copy(ZSData, rr.ZSData) - return &NINFO{rr.Hdr, ZSData} + return &NINFO{rr.Hdr, cloneSlice(rr.ZSData)} } + func (rr *NS) copy() RR { return &NS{rr.Hdr, rr.Ns} } + func (rr *NSAPPTR) copy() RR { return &NSAPPTR{rr.Hdr, rr.Ptr} } + func (rr *NSEC) copy() RR { - TypeBitMap := make([]uint16, len(rr.TypeBitMap)) - copy(TypeBitMap, rr.TypeBitMap) - return &NSEC{rr.Hdr, rr.NextDomain, TypeBitMap} + return &NSEC{rr.Hdr, rr.NextDomain, cloneSlice(rr.TypeBitMap)} } + func (rr *NSEC3) copy() RR { - TypeBitMap := make([]uint16, len(rr.TypeBitMap)) - copy(TypeBitMap, rr.TypeBitMap) - return &NSEC3{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap} + return &NSEC3{ + rr.Hdr, + rr.Hash, + rr.Flags, + rr.Iterations, + rr.SaltLength, + rr.Salt, + rr.HashLength, + rr.NextDomain, + cloneSlice(rr.TypeBitMap), + } } + func (rr *NSEC3PARAM) copy() RR { - return &NSEC3PARAM{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt} + return &NSEC3PARAM{ + rr.Hdr, + rr.Hash, + rr.Flags, + rr.Iterations, + rr.SaltLength, + rr.Salt, + } } + func (rr *NULL) copy() RR { return &NULL{rr.Hdr, rr.Data} } + func (rr *OPENPGPKEY) copy() RR { return &OPENPGPKEY{rr.Hdr, rr.PublicKey} } + func (rr *OPT) copy() RR { Option := make([]EDNS0, len(rr.Option)) for i, e := range rr.Option { @@ -867,86 +1103,205 @@ func (rr *OPT) copy() RR { } return &OPT{rr.Hdr, Option} } + func (rr *PTR) copy() RR { return &PTR{rr.Hdr, rr.Ptr} } + func (rr *PX) copy() RR { - return &PX{rr.Hdr, rr.Preference, rr.Map822, rr.Mapx400} + return &PX{ + rr.Hdr, + rr.Preference, + rr.Map822, + rr.Mapx400, + } } + func (rr *RFC3597) copy() RR { return &RFC3597{rr.Hdr, rr.Rdata} } + func (rr *RKEY) copy() RR { - return &RKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} + return &RKEY{ + rr.Hdr, + rr.Flags, + rr.Protocol, + rr.Algorithm, + rr.PublicKey, + } } + func (rr *RP) copy() RR { return &RP{rr.Hdr, rr.Mbox, rr.Txt} } + func (rr *RRSIG) copy() RR { - return &RRSIG{rr.Hdr, rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature} + return &RRSIG{ + rr.Hdr, + rr.TypeCovered, + rr.Algorithm, + rr.Labels, + rr.OrigTtl, + rr.Expiration, + rr.Inception, + rr.KeyTag, + rr.SignerName, + rr.Signature, + } } + func (rr *RT) copy() RR { return &RT{rr.Hdr, rr.Preference, rr.Host} } + func (rr *SIG) copy() RR { return &SIG{*rr.RRSIG.copy().(*RRSIG)} } + func (rr *SMIMEA) copy() RR { - return &SMIMEA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} + return &SMIMEA{ + rr.Hdr, + rr.Usage, + rr.Selector, + rr.MatchingType, + rr.Certificate, + } } + func (rr *SOA) copy() RR { - return &SOA{rr.Hdr, rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl} + return &SOA{ + rr.Hdr, + rr.Ns, + rr.Mbox, + rr.Serial, + rr.Refresh, + rr.Retry, + rr.Expire, + rr.Minttl, + } } + func (rr *SPF) copy() RR { - Txt := make([]string, len(rr.Txt)) - copy(Txt, rr.Txt) - return &SPF{rr.Hdr, Txt} + return &SPF{rr.Hdr, cloneSlice(rr.Txt)} } + func (rr *SRV) copy() RR { - return &SRV{rr.Hdr, rr.Priority, rr.Weight, rr.Port, rr.Target} + return &SRV{ + rr.Hdr, + rr.Priority, + rr.Weight, + rr.Port, + rr.Target, + } } + func (rr *SSHFP) copy() RR { - return &SSHFP{rr.Hdr, rr.Algorithm, rr.Type, rr.FingerPrint} + return &SSHFP{ + rr.Hdr, + rr.Algorithm, + rr.Type, + rr.FingerPrint, + } } + func (rr *SVCB) copy() RR { Value := make([]SVCBKeyValue, len(rr.Value)) for i, e := range rr.Value { Value[i] = e.copy() } - return &SVCB{rr.Hdr, rr.Priority, rr.Target, Value} + return &SVCB{ + rr.Hdr, + rr.Priority, + rr.Target, + Value, + } } + func (rr *TA) copy() RR { - return &TA{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} + return &TA{ + rr.Hdr, + rr.KeyTag, + rr.Algorithm, + rr.DigestType, + rr.Digest, + } } + func (rr *TALINK) copy() RR { return &TALINK{rr.Hdr, rr.PreviousName, rr.NextName} } + func (rr *TKEY) copy() RR { - return &TKEY{rr.Hdr, rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData} + return &TKEY{ + rr.Hdr, + rr.Algorithm, + rr.Inception, + rr.Expiration, + rr.Mode, + rr.Error, + rr.KeySize, + rr.Key, + rr.OtherLen, + rr.OtherData, + } } + func (rr *TLSA) copy() RR { - return &TLSA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} + return &TLSA{ + rr.Hdr, + rr.Usage, + rr.Selector, + rr.MatchingType, + rr.Certificate, + } } + func (rr *TSIG) copy() RR { - return &TSIG{rr.Hdr, rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData} + return &TSIG{ + rr.Hdr, + rr.Algorithm, + rr.TimeSigned, + rr.Fudge, + rr.MACSize, + rr.MAC, + rr.OrigId, + rr.Error, + rr.OtherLen, + rr.OtherData, + } } + func (rr *TXT) copy() RR { - Txt := make([]string, len(rr.Txt)) - copy(Txt, rr.Txt) - return &TXT{rr.Hdr, Txt} + return &TXT{rr.Hdr, cloneSlice(rr.Txt)} } + func (rr *UID) copy() RR { return &UID{rr.Hdr, rr.Uid} } + func (rr *UINFO) copy() RR { return &UINFO{rr.Hdr, rr.Uinfo} } + func (rr *URI) copy() RR { - return &URI{rr.Hdr, rr.Priority, rr.Weight, rr.Target} + return &URI{ + rr.Hdr, + rr.Priority, + rr.Weight, + rr.Target, + } } + func (rr *X25) copy() RR { return &X25{rr.Hdr, rr.PSDNAddress} } + func (rr *ZONEMD) copy() RR { - return &ZONEMD{rr.Hdr, rr.Serial, rr.Scheme, rr.Hash, rr.Digest} + return &ZONEMD{ + rr.Hdr, + rr.Serial, + rr.Scheme, + rr.Hash, + rr.Digest, + } } diff --git a/vendor/github.com/multiformats/go-multiaddr/protocols.go b/vendor/github.com/multiformats/go-multiaddr/protocols.go index db0b1218..b01e6cb8 100644 --- a/vendor/github.com/multiformats/go-multiaddr/protocols.go +++ b/vendor/github.com/multiformats/go-multiaddr/protocols.go @@ -31,14 +31,15 @@ const ( P_ONION3 = 445 P_GARLIC64 = 446 P_GARLIC32 = 447 - P_P2P_WEBRTC_DIRECT = 276 + P_P2P_WEBRTC_DIRECT = 276 // Deprecated. use webrtc-direct instead P_TLS = 448 P_SNI = 449 P_NOISE = 454 P_WS = 477 P_WSS = 478 // deprecated alias for /tls/ws P_PLAINTEXTV2 = 7367777 - P_WEBRTC = 280 + P_WEBRTC_DIRECT = 280 + P_WEBRTC = 281 ) var ( @@ -262,6 +263,11 @@ var ( Code: P_WSS, VCode: CodeToVarint(P_WSS), } + protoWebRTCDirect = Protocol{ + Name: "webrtc-direct", + Code: P_WEBRTC_DIRECT, + VCode: CodeToVarint(P_WEBRTC_DIRECT), + } protoWebRTC = Protocol{ Name: "webrtc", Code: P_WEBRTC, @@ -305,6 +311,7 @@ func init() { protoWS, protoWSS, protoPlaintextV2, + protoWebRTCDirect, protoWebRTC, } { if err := AddProtocol(p); err != nil { diff --git a/vendor/github.com/multiformats/go-multiaddr/version.json b/vendor/github.com/multiformats/go-multiaddr/version.json index 0ad79e3b..960b84e5 100644 --- a/vendor/github.com/multiformats/go-multiaddr/version.json +++ b/vendor/github.com/multiformats/go-multiaddr/version.json @@ -1,3 +1,3 @@ { - "version": "v0.8.0" + "version": "v0.9.0" } diff --git a/vendor/github.com/multiformats/go-multibase/version.json b/vendor/github.com/multiformats/go-multibase/version.json index 5e94b0fa..1437d5b7 100644 --- a/vendor/github.com/multiformats/go-multibase/version.json +++ b/vendor/github.com/multiformats/go-multibase/version.json @@ -1,3 +1,3 @@ { - "version": "v0.1.1" + "version": "v0.2.0" } diff --git a/vendor/github.com/multiformats/go-multicodec/code.go b/vendor/github.com/multiformats/go-multicodec/code.go index ed6b1dd8..6fc8ecc1 100644 --- a/vendor/github.com/multiformats/go-multicodec/code.go +++ b/vendor/github.com/multiformats/go-multicodec/code.go @@ -9,7 +9,7 @@ import ( //go:generate go run gen.go //go:generate gofmt -w code_table.go -//go:generate go run golang.org/x/tools/cmd/stringer@v0.1.10 -type=Code -linecomment +//go:generate go run golang.org/x/tools/cmd/stringer@v0.5.0 -type=Code -linecomment // Code describes an integer reserved in the multicodec table, defined at // github.com/multiformats/multicodec. diff --git a/vendor/github.com/multiformats/go-multicodec/code_string.go b/vendor/github.com/multiformats/go-multicodec/code_string.go index be6ecf6e..83663efe 100644 --- a/vendor/github.com/multiformats/go-multicodec/code_string.go +++ b/vendor/github.com/multiformats/go-multicodec/code_string.go @@ -117,6 +117,8 @@ func _() { _ = x[DagJson-297] _ = x[Udt-301] _ = x[Utp-302] + _ = x[Crc32-306] + _ = x[Crc64Ecma-356] _ = x[Unix-400] _ = x[Thread-406] _ = x[P2p-421] @@ -129,6 +131,7 @@ func _() { _ = x[Sni-449] _ = x[Noise-454] _ = x[Quic-460] + _ = x[QuicV1-461] _ = x[Webtransport-465] _ = x[Certhash-466] _ = x[Ws-477] @@ -139,6 +142,7 @@ func _() { _ = x[Json-512] _ = x[Messagepack-513] _ = x[Car-514] + _ = x[IpnsRecord-768] _ = x[Libp2pPeerRecord-769] _ = x[Libp2pRelayRsvp-770] _ = x[Memorytransport-777] @@ -146,6 +150,7 @@ func _() { _ = x[CarMultihashIndexSorted-1025] _ = x[TransportBitswap-2304] _ = x[TransportGraphsyncFilecoinv1-2320] + _ = x[Multidid-3357] _ = x[Sha2_256Trunc254Padded-4114] _ = x[Sha2_224-4115] _ = x[Sha2_512_224-4116] @@ -490,6 +495,10 @@ func _() { _ = x[Skein1024_1008-46046] _ = x[Skein1024_1016-46047] _ = x[Skein1024_1024-46048] + _ = x[Xxh32-46049] + _ = x[Xxh64-46050] + _ = x[Xxh3_64-46051] + _ = x[Xxh3_128-46052] _ = x[PoseidonBls12_381A2Fc1-46081] _ = x[PoseidonBls12_381A2Fc1Sc-46082] _ = x[Urdca2015Canon-46083] @@ -504,6 +513,7 @@ func _() { _ = x[Bls12381G2Sig-53483] _ = x[Eddsa-53485] _ = x[Eip191-53649] + _ = x[Jwk_jcsPub-60241] _ = x[FilCommitmentUnsealed-61697] _ = x[FilCommitmentSealed-61698] _ = x[Plaintextv2-7367777] @@ -523,7 +533,7 @@ func _() { _ = x[Rs256-13636101] } -const _Code_name = "identitycidv1cidv2cidv3ip4tcpsha1sha2-256sha2-512sha3-512sha3-384sha3-256sha3-224shake-128shake-256keccak-224keccak-256keccak-384keccak-512blake3sha2-384dccpmurmur3-x64-64murmur3-32ip6ip6zoneipcidrpathmulticodecmultihashmultiaddrmultibasednsdns4dns6dnsaddrprotobufcborrawdbl-sha2-256rlpbencodedag-pbdag-cborlibp2p-keygit-rawtorrent-infotorrent-fileleofcoin-blockleofcoin-txleofcoin-prsctpdag-josedag-coseeth-blocketh-block-listeth-tx-trieeth-txeth-tx-receipt-trieeth-tx-receipteth-state-trieeth-account-snapshoteth-storage-trieeth-receipt-log-trieeth-reciept-logaes-128aes-192aes-256chacha-128chacha-256bitcoin-blockbitcoin-txbitcoin-witness-commitmentzcash-blockzcash-txcaip-50streamidstellar-blockstellar-txmd4md5decred-blockdecred-txipldipfsswarmipnszeronetsecp256k1-pubdnslinkbls12_381-g1-pubbls12_381-g2-pubx25519-pubed25519-pubbls12_381-g1g2-pubdash-blockdash-txswarm-manifestswarm-feedbeesonudpp2p-webrtc-starp2p-webrtc-directp2p-stardustwebrtcp2p-circuitdag-jsonudtutpunixthreadp2phttpsoniononion3garlic64garlic32tlssninoisequicwebtransportcerthashwswssp2p-websocket-starhttpswhid-1-snpjsonmessagepackcarlibp2p-peer-recordlibp2p-relay-rsvpmemorytransportcar-index-sortedcar-multihash-index-sortedtransport-bitswaptransport-graphsync-filecoinv1sha2-256-trunc254-paddedsha2-224sha2-512-224sha2-512-256murmur3-x64-128ripemd-128ripemd-160ripemd-256ripemd-320x11p256-pubp384-pubp521-pubed448-pubx448-pubrsa-pubsm2-pubed25519-privsecp256k1-privx25519-privrsa-privkangarootwelvesilverpinesm3-256blake2b-8blake2b-16blake2b-24blake2b-32blake2b-40blake2b-48blake2b-56blake2b-64blake2b-72blake2b-80blake2b-88blake2b-96blake2b-104blake2b-112blake2b-120blake2b-128blake2b-136blake2b-144blake2b-152blake2b-160blake2b-168blake2b-176blake2b-184blake2b-192blake2b-200blake2b-208blake2b-216blake2b-224blake2b-232blake2b-240blake2b-248blake2b-256blake2b-264blake2b-272blake2b-280blake2b-288blake2b-296blake2b-304blake2b-312blake2b-320blake2b-328blake2b-336blake2b-344blake2b-352blake2b-360blake2b-368blake2b-376blake2b-384blake2b-392blake2b-400blake2b-408blake2b-416blake2b-424blake2b-432blake2b-440blake2b-448blake2b-456blake2b-464blake2b-472blake2b-480blake2b-488blake2b-496blake2b-504blake2b-512blake2s-8blake2s-16blake2s-24blake2s-32blake2s-40blake2s-48blake2s-56blake2s-64blake2s-72blake2s-80blake2s-88blake2s-96blake2s-104blake2s-112blake2s-120blake2s-128blake2s-136blake2s-144blake2s-152blake2s-160blake2s-168blake2s-176blake2s-184blake2s-192blake2s-200blake2s-208blake2s-216blake2s-224blake2s-232blake2s-240blake2s-248blake2s-256skein256-8skein256-16skein256-24skein256-32skein256-40skein256-48skein256-56skein256-64skein256-72skein256-80skein256-88skein256-96skein256-104skein256-112skein256-120skein256-128skein256-136skein256-144skein256-152skein256-160skein256-168skein256-176skein256-184skein256-192skein256-200skein256-208skein256-216skein256-224skein256-232skein256-240skein256-248skein256-256skein512-8skein512-16skein512-24skein512-32skein512-40skein512-48skein512-56skein512-64skein512-72skein512-80skein512-88skein512-96skein512-104skein512-112skein512-120skein512-128skein512-136skein512-144skein512-152skein512-160skein512-168skein512-176skein512-184skein512-192skein512-200skein512-208skein512-216skein512-224skein512-232skein512-240skein512-248skein512-256skein512-264skein512-272skein512-280skein512-288skein512-296skein512-304skein512-312skein512-320skein512-328skein512-336skein512-344skein512-352skein512-360skein512-368skein512-376skein512-384skein512-392skein512-400skein512-408skein512-416skein512-424skein512-432skein512-440skein512-448skein512-456skein512-464skein512-472skein512-480skein512-488skein512-496skein512-504skein512-512skein1024-8skein1024-16skein1024-24skein1024-32skein1024-40skein1024-48skein1024-56skein1024-64skein1024-72skein1024-80skein1024-88skein1024-96skein1024-104skein1024-112skein1024-120skein1024-128skein1024-136skein1024-144skein1024-152skein1024-160skein1024-168skein1024-176skein1024-184skein1024-192skein1024-200skein1024-208skein1024-216skein1024-224skein1024-232skein1024-240skein1024-248skein1024-256skein1024-264skein1024-272skein1024-280skein1024-288skein1024-296skein1024-304skein1024-312skein1024-320skein1024-328skein1024-336skein1024-344skein1024-352skein1024-360skein1024-368skein1024-376skein1024-384skein1024-392skein1024-400skein1024-408skein1024-416skein1024-424skein1024-432skein1024-440skein1024-448skein1024-456skein1024-464skein1024-472skein1024-480skein1024-488skein1024-496skein1024-504skein1024-512skein1024-520skein1024-528skein1024-536skein1024-544skein1024-552skein1024-560skein1024-568skein1024-576skein1024-584skein1024-592skein1024-600skein1024-608skein1024-616skein1024-624skein1024-632skein1024-640skein1024-648skein1024-656skein1024-664skein1024-672skein1024-680skein1024-688skein1024-696skein1024-704skein1024-712skein1024-720skein1024-728skein1024-736skein1024-744skein1024-752skein1024-760skein1024-768skein1024-776skein1024-784skein1024-792skein1024-800skein1024-808skein1024-816skein1024-824skein1024-832skein1024-840skein1024-848skein1024-856skein1024-864skein1024-872skein1024-880skein1024-888skein1024-896skein1024-904skein1024-912skein1024-920skein1024-928skein1024-936skein1024-944skein1024-952skein1024-960skein1024-968skein1024-976skein1024-984skein1024-992skein1024-1000skein1024-1008skein1024-1016skein1024-1024poseidon-bls12_381-a2-fc1poseidon-bls12_381-a2-fc1-scurdca-2015-canonsszssz-sha2-256-bmtjson-jcsiscczeroxcert-imprint-256varsiges256kbls-12381-g1-sigbls-12381-g2-sigeddsaeip-191fil-commitment-unsealedfil-commitment-sealedplaintextv2holochain-adr-v0holochain-adr-v1holochain-key-v0holochain-key-v1holochain-sig-v0holochain-sig-v1skynet-nsarweave-nssubspace-nskumandra-nses256es284es512rs256" +const _Code_name = "identitycidv1cidv2cidv3ip4tcpsha1sha2-256sha2-512sha3-512sha3-384sha3-256sha3-224shake-128shake-256keccak-224keccak-256keccak-384keccak-512blake3sha2-384dccpmurmur3-x64-64murmur3-32ip6ip6zoneipcidrpathmulticodecmultihashmultiaddrmultibasednsdns4dns6dnsaddrprotobufcborrawdbl-sha2-256rlpbencodedag-pbdag-cborlibp2p-keygit-rawtorrent-infotorrent-fileleofcoin-blockleofcoin-txleofcoin-prsctpdag-josedag-coseeth-blocketh-block-listeth-tx-trieeth-txeth-tx-receipt-trieeth-tx-receipteth-state-trieeth-account-snapshoteth-storage-trieeth-receipt-log-trieeth-reciept-logaes-128aes-192aes-256chacha-128chacha-256bitcoin-blockbitcoin-txbitcoin-witness-commitmentzcash-blockzcash-txcaip-50streamidstellar-blockstellar-txmd4md5decred-blockdecred-txipldipfsswarmipnszeronetsecp256k1-pubdnslinkbls12_381-g1-pubbls12_381-g2-pubx25519-pubed25519-pubbls12_381-g1g2-pubdash-blockdash-txswarm-manifestswarm-feedbeesonudpp2p-webrtc-starp2p-webrtc-directp2p-stardustwebrtcp2p-circuitdag-jsonudtutpcrc32crc64-ecmaunixthreadp2phttpsoniononion3garlic64garlic32tlssninoisequicquic-v1webtransportcerthashwswssp2p-websocket-starhttpswhid-1-snpjsonmessagepackcaripns-recordlibp2p-peer-recordlibp2p-relay-rsvpmemorytransportcar-index-sortedcar-multihash-index-sortedtransport-bitswaptransport-graphsync-filecoinv1multididsha2-256-trunc254-paddedsha2-224sha2-512-224sha2-512-256murmur3-x64-128ripemd-128ripemd-160ripemd-256ripemd-320x11p256-pubp384-pubp521-pubed448-pubx448-pubrsa-pubsm2-pubed25519-privsecp256k1-privx25519-privrsa-privkangarootwelvesilverpinesm3-256blake2b-8blake2b-16blake2b-24blake2b-32blake2b-40blake2b-48blake2b-56blake2b-64blake2b-72blake2b-80blake2b-88blake2b-96blake2b-104blake2b-112blake2b-120blake2b-128blake2b-136blake2b-144blake2b-152blake2b-160blake2b-168blake2b-176blake2b-184blake2b-192blake2b-200blake2b-208blake2b-216blake2b-224blake2b-232blake2b-240blake2b-248blake2b-256blake2b-264blake2b-272blake2b-280blake2b-288blake2b-296blake2b-304blake2b-312blake2b-320blake2b-328blake2b-336blake2b-344blake2b-352blake2b-360blake2b-368blake2b-376blake2b-384blake2b-392blake2b-400blake2b-408blake2b-416blake2b-424blake2b-432blake2b-440blake2b-448blake2b-456blake2b-464blake2b-472blake2b-480blake2b-488blake2b-496blake2b-504blake2b-512blake2s-8blake2s-16blake2s-24blake2s-32blake2s-40blake2s-48blake2s-56blake2s-64blake2s-72blake2s-80blake2s-88blake2s-96blake2s-104blake2s-112blake2s-120blake2s-128blake2s-136blake2s-144blake2s-152blake2s-160blake2s-168blake2s-176blake2s-184blake2s-192blake2s-200blake2s-208blake2s-216blake2s-224blake2s-232blake2s-240blake2s-248blake2s-256skein256-8skein256-16skein256-24skein256-32skein256-40skein256-48skein256-56skein256-64skein256-72skein256-80skein256-88skein256-96skein256-104skein256-112skein256-120skein256-128skein256-136skein256-144skein256-152skein256-160skein256-168skein256-176skein256-184skein256-192skein256-200skein256-208skein256-216skein256-224skein256-232skein256-240skein256-248skein256-256skein512-8skein512-16skein512-24skein512-32skein512-40skein512-48skein512-56skein512-64skein512-72skein512-80skein512-88skein512-96skein512-104skein512-112skein512-120skein512-128skein512-136skein512-144skein512-152skein512-160skein512-168skein512-176skein512-184skein512-192skein512-200skein512-208skein512-216skein512-224skein512-232skein512-240skein512-248skein512-256skein512-264skein512-272skein512-280skein512-288skein512-296skein512-304skein512-312skein512-320skein512-328skein512-336skein512-344skein512-352skein512-360skein512-368skein512-376skein512-384skein512-392skein512-400skein512-408skein512-416skein512-424skein512-432skein512-440skein512-448skein512-456skein512-464skein512-472skein512-480skein512-488skein512-496skein512-504skein512-512skein1024-8skein1024-16skein1024-24skein1024-32skein1024-40skein1024-48skein1024-56skein1024-64skein1024-72skein1024-80skein1024-88skein1024-96skein1024-104skein1024-112skein1024-120skein1024-128skein1024-136skein1024-144skein1024-152skein1024-160skein1024-168skein1024-176skein1024-184skein1024-192skein1024-200skein1024-208skein1024-216skein1024-224skein1024-232skein1024-240skein1024-248skein1024-256skein1024-264skein1024-272skein1024-280skein1024-288skein1024-296skein1024-304skein1024-312skein1024-320skein1024-328skein1024-336skein1024-344skein1024-352skein1024-360skein1024-368skein1024-376skein1024-384skein1024-392skein1024-400skein1024-408skein1024-416skein1024-424skein1024-432skein1024-440skein1024-448skein1024-456skein1024-464skein1024-472skein1024-480skein1024-488skein1024-496skein1024-504skein1024-512skein1024-520skein1024-528skein1024-536skein1024-544skein1024-552skein1024-560skein1024-568skein1024-576skein1024-584skein1024-592skein1024-600skein1024-608skein1024-616skein1024-624skein1024-632skein1024-640skein1024-648skein1024-656skein1024-664skein1024-672skein1024-680skein1024-688skein1024-696skein1024-704skein1024-712skein1024-720skein1024-728skein1024-736skein1024-744skein1024-752skein1024-760skein1024-768skein1024-776skein1024-784skein1024-792skein1024-800skein1024-808skein1024-816skein1024-824skein1024-832skein1024-840skein1024-848skein1024-856skein1024-864skein1024-872skein1024-880skein1024-888skein1024-896skein1024-904skein1024-912skein1024-920skein1024-928skein1024-936skein1024-944skein1024-952skein1024-960skein1024-968skein1024-976skein1024-984skein1024-992skein1024-1000skein1024-1008skein1024-1016skein1024-1024xxh-32xxh-64xxh3-64xxh3-128poseidon-bls12_381-a2-fc1poseidon-bls12_381-a2-fc1-scurdca-2015-canonsszssz-sha2-256-bmtjson-jcsiscczeroxcert-imprint-256varsiges256kbls-12381-g1-sigbls-12381-g2-sigeddsaeip-191jwk_jcs-pubfil-commitment-unsealedfil-commitment-sealedplaintextv2holochain-adr-v0holochain-adr-v1holochain-key-v0holochain-key-v1holochain-sig-v0holochain-sig-v1skynet-nsarweave-nssubspace-nskumandra-nses256es284es512rs256" var _Code_map = map[Code]string{ 0: _Code_name[0:8], @@ -635,410 +645,420 @@ var _Code_map = map[Code]string{ 297: _Code_name[962:970], 301: _Code_name[970:973], 302: _Code_name[973:976], - 400: _Code_name[976:980], - 406: _Code_name[980:986], - 421: _Code_name[986:989], - 443: _Code_name[989:994], - 444: _Code_name[994:999], - 445: _Code_name[999:1005], - 446: _Code_name[1005:1013], - 447: _Code_name[1013:1021], - 448: _Code_name[1021:1024], - 449: _Code_name[1024:1027], - 454: _Code_name[1027:1032], - 460: _Code_name[1032:1036], - 465: _Code_name[1036:1048], - 466: _Code_name[1048:1056], - 477: _Code_name[1056:1058], - 478: _Code_name[1058:1061], - 479: _Code_name[1061:1079], - 480: _Code_name[1079:1083], - 496: _Code_name[1083:1094], - 512: _Code_name[1094:1098], - 513: _Code_name[1098:1109], - 514: _Code_name[1109:1112], - 769: _Code_name[1112:1130], - 770: _Code_name[1130:1147], - 777: _Code_name[1147:1162], - 1024: _Code_name[1162:1178], - 1025: _Code_name[1178:1204], - 2304: _Code_name[1204:1221], - 2320: _Code_name[1221:1251], - 4114: _Code_name[1251:1275], - 4115: _Code_name[1275:1283], - 4116: _Code_name[1283:1295], - 4117: _Code_name[1295:1307], - 4130: _Code_name[1307:1322], - 4178: _Code_name[1322:1332], - 4179: _Code_name[1332:1342], - 4180: _Code_name[1342:1352], - 4181: _Code_name[1352:1362], - 4352: _Code_name[1362:1365], - 4608: _Code_name[1365:1373], - 4609: _Code_name[1373:1381], - 4610: _Code_name[1381:1389], - 4611: _Code_name[1389:1398], - 4612: _Code_name[1398:1406], - 4613: _Code_name[1406:1413], - 4614: _Code_name[1413:1420], - 4864: _Code_name[1420:1432], - 4865: _Code_name[1432:1446], - 4866: _Code_name[1446:1457], - 4869: _Code_name[1457:1465], - 7425: _Code_name[1465:1479], - 16194: _Code_name[1479:1489], - 21325: _Code_name[1489:1496], - 45569: _Code_name[1496:1505], - 45570: _Code_name[1505:1515], - 45571: _Code_name[1515:1525], - 45572: _Code_name[1525:1535], - 45573: _Code_name[1535:1545], - 45574: _Code_name[1545:1555], - 45575: _Code_name[1555:1565], - 45576: _Code_name[1565:1575], - 45577: _Code_name[1575:1585], - 45578: _Code_name[1585:1595], - 45579: _Code_name[1595:1605], - 45580: _Code_name[1605:1615], - 45581: _Code_name[1615:1626], - 45582: _Code_name[1626:1637], - 45583: _Code_name[1637:1648], - 45584: _Code_name[1648:1659], - 45585: _Code_name[1659:1670], - 45586: _Code_name[1670:1681], - 45587: _Code_name[1681:1692], - 45588: _Code_name[1692:1703], - 45589: _Code_name[1703:1714], - 45590: _Code_name[1714:1725], - 45591: _Code_name[1725:1736], - 45592: _Code_name[1736:1747], - 45593: _Code_name[1747:1758], - 45594: _Code_name[1758:1769], - 45595: _Code_name[1769:1780], - 45596: _Code_name[1780:1791], - 45597: _Code_name[1791:1802], - 45598: _Code_name[1802:1813], - 45599: _Code_name[1813:1824], - 45600: _Code_name[1824:1835], - 45601: _Code_name[1835:1846], - 45602: _Code_name[1846:1857], - 45603: _Code_name[1857:1868], - 45604: _Code_name[1868:1879], - 45605: _Code_name[1879:1890], - 45606: _Code_name[1890:1901], - 45607: _Code_name[1901:1912], - 45608: _Code_name[1912:1923], - 45609: _Code_name[1923:1934], - 45610: _Code_name[1934:1945], - 45611: _Code_name[1945:1956], - 45612: _Code_name[1956:1967], - 45613: _Code_name[1967:1978], - 45614: _Code_name[1978:1989], - 45615: _Code_name[1989:2000], - 45616: _Code_name[2000:2011], - 45617: _Code_name[2011:2022], - 45618: _Code_name[2022:2033], - 45619: _Code_name[2033:2044], - 45620: _Code_name[2044:2055], - 45621: _Code_name[2055:2066], - 45622: _Code_name[2066:2077], - 45623: _Code_name[2077:2088], - 45624: _Code_name[2088:2099], - 45625: _Code_name[2099:2110], - 45626: _Code_name[2110:2121], - 45627: _Code_name[2121:2132], - 45628: _Code_name[2132:2143], - 45629: _Code_name[2143:2154], - 45630: _Code_name[2154:2165], - 45631: _Code_name[2165:2176], - 45632: _Code_name[2176:2187], - 45633: _Code_name[2187:2196], - 45634: _Code_name[2196:2206], - 45635: _Code_name[2206:2216], - 45636: _Code_name[2216:2226], - 45637: _Code_name[2226:2236], - 45638: _Code_name[2236:2246], - 45639: _Code_name[2246:2256], - 45640: _Code_name[2256:2266], - 45641: _Code_name[2266:2276], - 45642: _Code_name[2276:2286], - 45643: _Code_name[2286:2296], - 45644: _Code_name[2296:2306], - 45645: _Code_name[2306:2317], - 45646: _Code_name[2317:2328], - 45647: _Code_name[2328:2339], - 45648: _Code_name[2339:2350], - 45649: _Code_name[2350:2361], - 45650: _Code_name[2361:2372], - 45651: _Code_name[2372:2383], - 45652: _Code_name[2383:2394], - 45653: _Code_name[2394:2405], - 45654: _Code_name[2405:2416], - 45655: _Code_name[2416:2427], - 45656: _Code_name[2427:2438], - 45657: _Code_name[2438:2449], - 45658: _Code_name[2449:2460], - 45659: _Code_name[2460:2471], - 45660: _Code_name[2471:2482], - 45661: _Code_name[2482:2493], - 45662: _Code_name[2493:2504], - 45663: _Code_name[2504:2515], - 45664: _Code_name[2515:2526], - 45825: _Code_name[2526:2536], - 45826: _Code_name[2536:2547], - 45827: _Code_name[2547:2558], - 45828: _Code_name[2558:2569], - 45829: _Code_name[2569:2580], - 45830: _Code_name[2580:2591], - 45831: _Code_name[2591:2602], - 45832: _Code_name[2602:2613], - 45833: _Code_name[2613:2624], - 45834: _Code_name[2624:2635], - 45835: _Code_name[2635:2646], - 45836: _Code_name[2646:2657], - 45837: _Code_name[2657:2669], - 45838: _Code_name[2669:2681], - 45839: _Code_name[2681:2693], - 45840: _Code_name[2693:2705], - 45841: _Code_name[2705:2717], - 45842: _Code_name[2717:2729], - 45843: _Code_name[2729:2741], - 45844: _Code_name[2741:2753], - 45845: _Code_name[2753:2765], - 45846: _Code_name[2765:2777], - 45847: _Code_name[2777:2789], - 45848: _Code_name[2789:2801], - 45849: _Code_name[2801:2813], - 45850: _Code_name[2813:2825], - 45851: _Code_name[2825:2837], - 45852: _Code_name[2837:2849], - 45853: _Code_name[2849:2861], - 45854: _Code_name[2861:2873], - 45855: _Code_name[2873:2885], - 45856: _Code_name[2885:2897], - 45857: _Code_name[2897:2907], - 45858: _Code_name[2907:2918], - 45859: _Code_name[2918:2929], - 45860: _Code_name[2929:2940], - 45861: _Code_name[2940:2951], - 45862: _Code_name[2951:2962], - 45863: _Code_name[2962:2973], - 45864: _Code_name[2973:2984], - 45865: _Code_name[2984:2995], - 45866: _Code_name[2995:3006], - 45867: _Code_name[3006:3017], - 45868: _Code_name[3017:3028], - 45869: _Code_name[3028:3040], - 45870: _Code_name[3040:3052], - 45871: _Code_name[3052:3064], - 45872: _Code_name[3064:3076], - 45873: _Code_name[3076:3088], - 45874: _Code_name[3088:3100], - 45875: _Code_name[3100:3112], - 45876: _Code_name[3112:3124], - 45877: _Code_name[3124:3136], - 45878: _Code_name[3136:3148], - 45879: _Code_name[3148:3160], - 45880: _Code_name[3160:3172], - 45881: _Code_name[3172:3184], - 45882: _Code_name[3184:3196], - 45883: _Code_name[3196:3208], - 45884: _Code_name[3208:3220], - 45885: _Code_name[3220:3232], - 45886: _Code_name[3232:3244], - 45887: _Code_name[3244:3256], - 45888: _Code_name[3256:3268], - 45889: _Code_name[3268:3280], - 45890: _Code_name[3280:3292], - 45891: _Code_name[3292:3304], - 45892: _Code_name[3304:3316], - 45893: _Code_name[3316:3328], - 45894: _Code_name[3328:3340], - 45895: _Code_name[3340:3352], - 45896: _Code_name[3352:3364], - 45897: _Code_name[3364:3376], - 45898: _Code_name[3376:3388], - 45899: _Code_name[3388:3400], - 45900: _Code_name[3400:3412], - 45901: _Code_name[3412:3424], - 45902: _Code_name[3424:3436], - 45903: _Code_name[3436:3448], - 45904: _Code_name[3448:3460], - 45905: _Code_name[3460:3472], - 45906: _Code_name[3472:3484], - 45907: _Code_name[3484:3496], - 45908: _Code_name[3496:3508], - 45909: _Code_name[3508:3520], - 45910: _Code_name[3520:3532], - 45911: _Code_name[3532:3544], - 45912: _Code_name[3544:3556], - 45913: _Code_name[3556:3568], - 45914: _Code_name[3568:3580], - 45915: _Code_name[3580:3592], - 45916: _Code_name[3592:3604], - 45917: _Code_name[3604:3616], - 45918: _Code_name[3616:3628], - 45919: _Code_name[3628:3640], - 45920: _Code_name[3640:3652], - 45921: _Code_name[3652:3663], - 45922: _Code_name[3663:3675], - 45923: _Code_name[3675:3687], - 45924: _Code_name[3687:3699], - 45925: _Code_name[3699:3711], - 45926: _Code_name[3711:3723], - 45927: _Code_name[3723:3735], - 45928: _Code_name[3735:3747], - 45929: _Code_name[3747:3759], - 45930: _Code_name[3759:3771], - 45931: _Code_name[3771:3783], - 45932: _Code_name[3783:3795], - 45933: _Code_name[3795:3808], - 45934: _Code_name[3808:3821], - 45935: _Code_name[3821:3834], - 45936: _Code_name[3834:3847], - 45937: _Code_name[3847:3860], - 45938: _Code_name[3860:3873], - 45939: _Code_name[3873:3886], - 45940: _Code_name[3886:3899], - 45941: _Code_name[3899:3912], - 45942: _Code_name[3912:3925], - 45943: _Code_name[3925:3938], - 45944: _Code_name[3938:3951], - 45945: _Code_name[3951:3964], - 45946: _Code_name[3964:3977], - 45947: _Code_name[3977:3990], - 45948: _Code_name[3990:4003], - 45949: _Code_name[4003:4016], - 45950: _Code_name[4016:4029], - 45951: _Code_name[4029:4042], - 45952: _Code_name[4042:4055], - 45953: _Code_name[4055:4068], - 45954: _Code_name[4068:4081], - 45955: _Code_name[4081:4094], - 45956: _Code_name[4094:4107], - 45957: _Code_name[4107:4120], - 45958: _Code_name[4120:4133], - 45959: _Code_name[4133:4146], - 45960: _Code_name[4146:4159], - 45961: _Code_name[4159:4172], - 45962: _Code_name[4172:4185], - 45963: _Code_name[4185:4198], - 45964: _Code_name[4198:4211], - 45965: _Code_name[4211:4224], - 45966: _Code_name[4224:4237], - 45967: _Code_name[4237:4250], - 45968: _Code_name[4250:4263], - 45969: _Code_name[4263:4276], - 45970: _Code_name[4276:4289], - 45971: _Code_name[4289:4302], - 45972: _Code_name[4302:4315], - 45973: _Code_name[4315:4328], - 45974: _Code_name[4328:4341], - 45975: _Code_name[4341:4354], - 45976: _Code_name[4354:4367], - 45977: _Code_name[4367:4380], - 45978: _Code_name[4380:4393], - 45979: _Code_name[4393:4406], - 45980: _Code_name[4406:4419], - 45981: _Code_name[4419:4432], - 45982: _Code_name[4432:4445], - 45983: _Code_name[4445:4458], - 45984: _Code_name[4458:4471], - 45985: _Code_name[4471:4484], - 45986: _Code_name[4484:4497], - 45987: _Code_name[4497:4510], - 45988: _Code_name[4510:4523], - 45989: _Code_name[4523:4536], - 45990: _Code_name[4536:4549], - 45991: _Code_name[4549:4562], - 45992: _Code_name[4562:4575], - 45993: _Code_name[4575:4588], - 45994: _Code_name[4588:4601], - 45995: _Code_name[4601:4614], - 45996: _Code_name[4614:4627], - 45997: _Code_name[4627:4640], - 45998: _Code_name[4640:4653], - 45999: _Code_name[4653:4666], - 46000: _Code_name[4666:4679], - 46001: _Code_name[4679:4692], - 46002: _Code_name[4692:4705], - 46003: _Code_name[4705:4718], - 46004: _Code_name[4718:4731], - 46005: _Code_name[4731:4744], - 46006: _Code_name[4744:4757], - 46007: _Code_name[4757:4770], - 46008: _Code_name[4770:4783], - 46009: _Code_name[4783:4796], - 46010: _Code_name[4796:4809], - 46011: _Code_name[4809:4822], - 46012: _Code_name[4822:4835], - 46013: _Code_name[4835:4848], - 46014: _Code_name[4848:4861], - 46015: _Code_name[4861:4874], - 46016: _Code_name[4874:4887], - 46017: _Code_name[4887:4900], - 46018: _Code_name[4900:4913], - 46019: _Code_name[4913:4926], - 46020: _Code_name[4926:4939], - 46021: _Code_name[4939:4952], - 46022: _Code_name[4952:4965], - 46023: _Code_name[4965:4978], - 46024: _Code_name[4978:4991], - 46025: _Code_name[4991:5004], - 46026: _Code_name[5004:5017], - 46027: _Code_name[5017:5030], - 46028: _Code_name[5030:5043], - 46029: _Code_name[5043:5056], - 46030: _Code_name[5056:5069], - 46031: _Code_name[5069:5082], - 46032: _Code_name[5082:5095], - 46033: _Code_name[5095:5108], - 46034: _Code_name[5108:5121], - 46035: _Code_name[5121:5134], - 46036: _Code_name[5134:5147], - 46037: _Code_name[5147:5160], - 46038: _Code_name[5160:5173], - 46039: _Code_name[5173:5186], - 46040: _Code_name[5186:5199], - 46041: _Code_name[5199:5212], - 46042: _Code_name[5212:5225], - 46043: _Code_name[5225:5238], - 46044: _Code_name[5238:5251], - 46045: _Code_name[5251:5265], - 46046: _Code_name[5265:5279], - 46047: _Code_name[5279:5293], - 46048: _Code_name[5293:5307], - 46081: _Code_name[5307:5332], - 46082: _Code_name[5332:5360], - 46083: _Code_name[5360:5376], - 46337: _Code_name[5376:5379], - 46338: _Code_name[5379:5395], - 46593: _Code_name[5395:5403], - 52225: _Code_name[5403:5407], - 52753: _Code_name[5407:5428], - 53248: _Code_name[5428:5434], - 53479: _Code_name[5434:5440], - 53482: _Code_name[5440:5456], - 53483: _Code_name[5456:5472], - 53485: _Code_name[5472:5477], - 53649: _Code_name[5477:5484], - 61697: _Code_name[5484:5507], - 61698: _Code_name[5507:5528], - 7367777: _Code_name[5528:5539], - 8417572: _Code_name[5539:5555], - 8483108: _Code_name[5555:5571], - 9728292: _Code_name[5571:5587], - 9793828: _Code_name[5587:5603], - 10645796: _Code_name[5603:5619], - 10711332: _Code_name[5619:5635], - 11639056: _Code_name[5635:5644], - 11704592: _Code_name[5644:5654], - 11770128: _Code_name[5654:5665], - 11835664: _Code_name[5665:5676], - 13636096: _Code_name[5676:5681], - 13636097: _Code_name[5681:5686], - 13636098: _Code_name[5686:5691], - 13636101: _Code_name[5691:5696], + 306: _Code_name[976:981], + 356: _Code_name[981:991], + 400: _Code_name[991:995], + 406: _Code_name[995:1001], + 421: _Code_name[1001:1004], + 443: _Code_name[1004:1009], + 444: _Code_name[1009:1014], + 445: _Code_name[1014:1020], + 446: _Code_name[1020:1028], + 447: _Code_name[1028:1036], + 448: _Code_name[1036:1039], + 449: _Code_name[1039:1042], + 454: _Code_name[1042:1047], + 460: _Code_name[1047:1051], + 461: _Code_name[1051:1058], + 465: _Code_name[1058:1070], + 466: _Code_name[1070:1078], + 477: _Code_name[1078:1080], + 478: _Code_name[1080:1083], + 479: _Code_name[1083:1101], + 480: _Code_name[1101:1105], + 496: _Code_name[1105:1116], + 512: _Code_name[1116:1120], + 513: _Code_name[1120:1131], + 514: _Code_name[1131:1134], + 768: _Code_name[1134:1145], + 769: _Code_name[1145:1163], + 770: _Code_name[1163:1180], + 777: _Code_name[1180:1195], + 1024: _Code_name[1195:1211], + 1025: _Code_name[1211:1237], + 2304: _Code_name[1237:1254], + 2320: _Code_name[1254:1284], + 3357: _Code_name[1284:1292], + 4114: _Code_name[1292:1316], + 4115: _Code_name[1316:1324], + 4116: _Code_name[1324:1336], + 4117: _Code_name[1336:1348], + 4130: _Code_name[1348:1363], + 4178: _Code_name[1363:1373], + 4179: _Code_name[1373:1383], + 4180: _Code_name[1383:1393], + 4181: _Code_name[1393:1403], + 4352: _Code_name[1403:1406], + 4608: _Code_name[1406:1414], + 4609: _Code_name[1414:1422], + 4610: _Code_name[1422:1430], + 4611: _Code_name[1430:1439], + 4612: _Code_name[1439:1447], + 4613: _Code_name[1447:1454], + 4614: _Code_name[1454:1461], + 4864: _Code_name[1461:1473], + 4865: _Code_name[1473:1487], + 4866: _Code_name[1487:1498], + 4869: _Code_name[1498:1506], + 7425: _Code_name[1506:1520], + 16194: _Code_name[1520:1530], + 21325: _Code_name[1530:1537], + 45569: _Code_name[1537:1546], + 45570: _Code_name[1546:1556], + 45571: _Code_name[1556:1566], + 45572: _Code_name[1566:1576], + 45573: _Code_name[1576:1586], + 45574: _Code_name[1586:1596], + 45575: _Code_name[1596:1606], + 45576: _Code_name[1606:1616], + 45577: _Code_name[1616:1626], + 45578: _Code_name[1626:1636], + 45579: _Code_name[1636:1646], + 45580: _Code_name[1646:1656], + 45581: _Code_name[1656:1667], + 45582: _Code_name[1667:1678], + 45583: _Code_name[1678:1689], + 45584: _Code_name[1689:1700], + 45585: _Code_name[1700:1711], + 45586: _Code_name[1711:1722], + 45587: _Code_name[1722:1733], + 45588: _Code_name[1733:1744], + 45589: _Code_name[1744:1755], + 45590: _Code_name[1755:1766], + 45591: _Code_name[1766:1777], + 45592: _Code_name[1777:1788], + 45593: _Code_name[1788:1799], + 45594: _Code_name[1799:1810], + 45595: _Code_name[1810:1821], + 45596: _Code_name[1821:1832], + 45597: _Code_name[1832:1843], + 45598: _Code_name[1843:1854], + 45599: _Code_name[1854:1865], + 45600: _Code_name[1865:1876], + 45601: _Code_name[1876:1887], + 45602: _Code_name[1887:1898], + 45603: _Code_name[1898:1909], + 45604: _Code_name[1909:1920], + 45605: _Code_name[1920:1931], + 45606: _Code_name[1931:1942], + 45607: _Code_name[1942:1953], + 45608: _Code_name[1953:1964], + 45609: _Code_name[1964:1975], + 45610: _Code_name[1975:1986], + 45611: _Code_name[1986:1997], + 45612: _Code_name[1997:2008], + 45613: _Code_name[2008:2019], + 45614: _Code_name[2019:2030], + 45615: _Code_name[2030:2041], + 45616: _Code_name[2041:2052], + 45617: _Code_name[2052:2063], + 45618: _Code_name[2063:2074], + 45619: _Code_name[2074:2085], + 45620: _Code_name[2085:2096], + 45621: _Code_name[2096:2107], + 45622: _Code_name[2107:2118], + 45623: _Code_name[2118:2129], + 45624: _Code_name[2129:2140], + 45625: _Code_name[2140:2151], + 45626: _Code_name[2151:2162], + 45627: _Code_name[2162:2173], + 45628: _Code_name[2173:2184], + 45629: _Code_name[2184:2195], + 45630: _Code_name[2195:2206], + 45631: _Code_name[2206:2217], + 45632: _Code_name[2217:2228], + 45633: _Code_name[2228:2237], + 45634: _Code_name[2237:2247], + 45635: _Code_name[2247:2257], + 45636: _Code_name[2257:2267], + 45637: _Code_name[2267:2277], + 45638: _Code_name[2277:2287], + 45639: _Code_name[2287:2297], + 45640: _Code_name[2297:2307], + 45641: _Code_name[2307:2317], + 45642: _Code_name[2317:2327], + 45643: _Code_name[2327:2337], + 45644: _Code_name[2337:2347], + 45645: _Code_name[2347:2358], + 45646: _Code_name[2358:2369], + 45647: _Code_name[2369:2380], + 45648: _Code_name[2380:2391], + 45649: _Code_name[2391:2402], + 45650: _Code_name[2402:2413], + 45651: _Code_name[2413:2424], + 45652: _Code_name[2424:2435], + 45653: _Code_name[2435:2446], + 45654: _Code_name[2446:2457], + 45655: _Code_name[2457:2468], + 45656: _Code_name[2468:2479], + 45657: _Code_name[2479:2490], + 45658: _Code_name[2490:2501], + 45659: _Code_name[2501:2512], + 45660: _Code_name[2512:2523], + 45661: _Code_name[2523:2534], + 45662: _Code_name[2534:2545], + 45663: _Code_name[2545:2556], + 45664: _Code_name[2556:2567], + 45825: _Code_name[2567:2577], + 45826: _Code_name[2577:2588], + 45827: _Code_name[2588:2599], + 45828: _Code_name[2599:2610], + 45829: _Code_name[2610:2621], + 45830: _Code_name[2621:2632], + 45831: _Code_name[2632:2643], + 45832: _Code_name[2643:2654], + 45833: _Code_name[2654:2665], + 45834: _Code_name[2665:2676], + 45835: _Code_name[2676:2687], + 45836: _Code_name[2687:2698], + 45837: _Code_name[2698:2710], + 45838: _Code_name[2710:2722], + 45839: _Code_name[2722:2734], + 45840: _Code_name[2734:2746], + 45841: _Code_name[2746:2758], + 45842: _Code_name[2758:2770], + 45843: _Code_name[2770:2782], + 45844: _Code_name[2782:2794], + 45845: _Code_name[2794:2806], + 45846: _Code_name[2806:2818], + 45847: _Code_name[2818:2830], + 45848: _Code_name[2830:2842], + 45849: _Code_name[2842:2854], + 45850: _Code_name[2854:2866], + 45851: _Code_name[2866:2878], + 45852: _Code_name[2878:2890], + 45853: _Code_name[2890:2902], + 45854: _Code_name[2902:2914], + 45855: _Code_name[2914:2926], + 45856: _Code_name[2926:2938], + 45857: _Code_name[2938:2948], + 45858: _Code_name[2948:2959], + 45859: _Code_name[2959:2970], + 45860: _Code_name[2970:2981], + 45861: _Code_name[2981:2992], + 45862: _Code_name[2992:3003], + 45863: _Code_name[3003:3014], + 45864: _Code_name[3014:3025], + 45865: _Code_name[3025:3036], + 45866: _Code_name[3036:3047], + 45867: _Code_name[3047:3058], + 45868: _Code_name[3058:3069], + 45869: _Code_name[3069:3081], + 45870: _Code_name[3081:3093], + 45871: _Code_name[3093:3105], + 45872: _Code_name[3105:3117], + 45873: _Code_name[3117:3129], + 45874: _Code_name[3129:3141], + 45875: _Code_name[3141:3153], + 45876: _Code_name[3153:3165], + 45877: _Code_name[3165:3177], + 45878: _Code_name[3177:3189], + 45879: _Code_name[3189:3201], + 45880: _Code_name[3201:3213], + 45881: _Code_name[3213:3225], + 45882: _Code_name[3225:3237], + 45883: _Code_name[3237:3249], + 45884: _Code_name[3249:3261], + 45885: _Code_name[3261:3273], + 45886: _Code_name[3273:3285], + 45887: _Code_name[3285:3297], + 45888: _Code_name[3297:3309], + 45889: _Code_name[3309:3321], + 45890: _Code_name[3321:3333], + 45891: _Code_name[3333:3345], + 45892: _Code_name[3345:3357], + 45893: _Code_name[3357:3369], + 45894: _Code_name[3369:3381], + 45895: _Code_name[3381:3393], + 45896: _Code_name[3393:3405], + 45897: _Code_name[3405:3417], + 45898: _Code_name[3417:3429], + 45899: _Code_name[3429:3441], + 45900: _Code_name[3441:3453], + 45901: _Code_name[3453:3465], + 45902: _Code_name[3465:3477], + 45903: _Code_name[3477:3489], + 45904: _Code_name[3489:3501], + 45905: _Code_name[3501:3513], + 45906: _Code_name[3513:3525], + 45907: _Code_name[3525:3537], + 45908: _Code_name[3537:3549], + 45909: _Code_name[3549:3561], + 45910: _Code_name[3561:3573], + 45911: _Code_name[3573:3585], + 45912: _Code_name[3585:3597], + 45913: _Code_name[3597:3609], + 45914: _Code_name[3609:3621], + 45915: _Code_name[3621:3633], + 45916: _Code_name[3633:3645], + 45917: _Code_name[3645:3657], + 45918: _Code_name[3657:3669], + 45919: _Code_name[3669:3681], + 45920: _Code_name[3681:3693], + 45921: _Code_name[3693:3704], + 45922: _Code_name[3704:3716], + 45923: _Code_name[3716:3728], + 45924: _Code_name[3728:3740], + 45925: _Code_name[3740:3752], + 45926: _Code_name[3752:3764], + 45927: _Code_name[3764:3776], + 45928: _Code_name[3776:3788], + 45929: _Code_name[3788:3800], + 45930: _Code_name[3800:3812], + 45931: _Code_name[3812:3824], + 45932: _Code_name[3824:3836], + 45933: _Code_name[3836:3849], + 45934: _Code_name[3849:3862], + 45935: _Code_name[3862:3875], + 45936: _Code_name[3875:3888], + 45937: _Code_name[3888:3901], + 45938: _Code_name[3901:3914], + 45939: _Code_name[3914:3927], + 45940: _Code_name[3927:3940], + 45941: _Code_name[3940:3953], + 45942: _Code_name[3953:3966], + 45943: _Code_name[3966:3979], + 45944: _Code_name[3979:3992], + 45945: _Code_name[3992:4005], + 45946: _Code_name[4005:4018], + 45947: _Code_name[4018:4031], + 45948: _Code_name[4031:4044], + 45949: _Code_name[4044:4057], + 45950: _Code_name[4057:4070], + 45951: _Code_name[4070:4083], + 45952: _Code_name[4083:4096], + 45953: _Code_name[4096:4109], + 45954: _Code_name[4109:4122], + 45955: _Code_name[4122:4135], + 45956: _Code_name[4135:4148], + 45957: _Code_name[4148:4161], + 45958: _Code_name[4161:4174], + 45959: _Code_name[4174:4187], + 45960: _Code_name[4187:4200], + 45961: _Code_name[4200:4213], + 45962: _Code_name[4213:4226], + 45963: _Code_name[4226:4239], + 45964: _Code_name[4239:4252], + 45965: _Code_name[4252:4265], + 45966: _Code_name[4265:4278], + 45967: _Code_name[4278:4291], + 45968: _Code_name[4291:4304], + 45969: _Code_name[4304:4317], + 45970: _Code_name[4317:4330], + 45971: _Code_name[4330:4343], + 45972: _Code_name[4343:4356], + 45973: _Code_name[4356:4369], + 45974: _Code_name[4369:4382], + 45975: _Code_name[4382:4395], + 45976: _Code_name[4395:4408], + 45977: _Code_name[4408:4421], + 45978: _Code_name[4421:4434], + 45979: _Code_name[4434:4447], + 45980: _Code_name[4447:4460], + 45981: _Code_name[4460:4473], + 45982: _Code_name[4473:4486], + 45983: _Code_name[4486:4499], + 45984: _Code_name[4499:4512], + 45985: _Code_name[4512:4525], + 45986: _Code_name[4525:4538], + 45987: _Code_name[4538:4551], + 45988: _Code_name[4551:4564], + 45989: _Code_name[4564:4577], + 45990: _Code_name[4577:4590], + 45991: _Code_name[4590:4603], + 45992: _Code_name[4603:4616], + 45993: _Code_name[4616:4629], + 45994: _Code_name[4629:4642], + 45995: _Code_name[4642:4655], + 45996: _Code_name[4655:4668], + 45997: _Code_name[4668:4681], + 45998: _Code_name[4681:4694], + 45999: _Code_name[4694:4707], + 46000: _Code_name[4707:4720], + 46001: _Code_name[4720:4733], + 46002: _Code_name[4733:4746], + 46003: _Code_name[4746:4759], + 46004: _Code_name[4759:4772], + 46005: _Code_name[4772:4785], + 46006: _Code_name[4785:4798], + 46007: _Code_name[4798:4811], + 46008: _Code_name[4811:4824], + 46009: _Code_name[4824:4837], + 46010: _Code_name[4837:4850], + 46011: _Code_name[4850:4863], + 46012: _Code_name[4863:4876], + 46013: _Code_name[4876:4889], + 46014: _Code_name[4889:4902], + 46015: _Code_name[4902:4915], + 46016: _Code_name[4915:4928], + 46017: _Code_name[4928:4941], + 46018: _Code_name[4941:4954], + 46019: _Code_name[4954:4967], + 46020: _Code_name[4967:4980], + 46021: _Code_name[4980:4993], + 46022: _Code_name[4993:5006], + 46023: _Code_name[5006:5019], + 46024: _Code_name[5019:5032], + 46025: _Code_name[5032:5045], + 46026: _Code_name[5045:5058], + 46027: _Code_name[5058:5071], + 46028: _Code_name[5071:5084], + 46029: _Code_name[5084:5097], + 46030: _Code_name[5097:5110], + 46031: _Code_name[5110:5123], + 46032: _Code_name[5123:5136], + 46033: _Code_name[5136:5149], + 46034: _Code_name[5149:5162], + 46035: _Code_name[5162:5175], + 46036: _Code_name[5175:5188], + 46037: _Code_name[5188:5201], + 46038: _Code_name[5201:5214], + 46039: _Code_name[5214:5227], + 46040: _Code_name[5227:5240], + 46041: _Code_name[5240:5253], + 46042: _Code_name[5253:5266], + 46043: _Code_name[5266:5279], + 46044: _Code_name[5279:5292], + 46045: _Code_name[5292:5306], + 46046: _Code_name[5306:5320], + 46047: _Code_name[5320:5334], + 46048: _Code_name[5334:5348], + 46049: _Code_name[5348:5354], + 46050: _Code_name[5354:5360], + 46051: _Code_name[5360:5367], + 46052: _Code_name[5367:5375], + 46081: _Code_name[5375:5400], + 46082: _Code_name[5400:5428], + 46083: _Code_name[5428:5444], + 46337: _Code_name[5444:5447], + 46338: _Code_name[5447:5463], + 46593: _Code_name[5463:5471], + 52225: _Code_name[5471:5475], + 52753: _Code_name[5475:5496], + 53248: _Code_name[5496:5502], + 53479: _Code_name[5502:5508], + 53482: _Code_name[5508:5524], + 53483: _Code_name[5524:5540], + 53485: _Code_name[5540:5545], + 53649: _Code_name[5545:5552], + 60241: _Code_name[5552:5563], + 61697: _Code_name[5563:5586], + 61698: _Code_name[5586:5607], + 7367777: _Code_name[5607:5618], + 8417572: _Code_name[5618:5634], + 8483108: _Code_name[5634:5650], + 9728292: _Code_name[5650:5666], + 9793828: _Code_name[5666:5682], + 10645796: _Code_name[5682:5698], + 10711332: _Code_name[5698:5714], + 11639056: _Code_name[5714:5723], + 11704592: _Code_name[5723:5733], + 11770128: _Code_name[5733:5744], + 11835664: _Code_name[5744:5755], + 13636096: _Code_name[5755:5760], + 13636097: _Code_name[5760:5765], + 13636098: _Code_name[5765:5770], + 13636101: _Code_name[5770:5775], } func (i Code) String() string { diff --git a/vendor/github.com/multiformats/go-multicodec/code_table.go b/vendor/github.com/multiformats/go-multicodec/code_table.go index 59f72436..edadd4cd 100644 --- a/vendor/github.com/multiformats/go-multicodec/code_table.go +++ b/vendor/github.com/multiformats/go-multicodec/code_table.go @@ -69,10 +69,10 @@ const ( // Dccp is a draft code tagged "multiaddr". Dccp Code = 0x21 // dccp - // Murmur3X64_64 is a permanent code tagged "multihash" and described by: The first 64-bits of a murmur3-x64-128 - used for UnixFS directory sharding.. + // Murmur3X64_64 is a permanent code tagged "hash" and described by: The first 64-bits of a murmur3-x64-128 - used for UnixFS directory sharding.. Murmur3X64_64 Code = 0x22 // murmur3-x64-64 - // Murmur3_32 is a draft code tagged "multihash". + // Murmur3_32 is a draft code tagged "hash". Murmur3_32 Code = 0x23 // murmur3-32 // Ip6 is a permanent code tagged "multiaddr". @@ -330,6 +330,12 @@ const ( // Utp is a draft code tagged "multiaddr". Utp Code = 0x012e // utp + // Crc32 is a draft code tagged "hash" and described by: CRC-32 non-cryptographic hash algorithm (IEEE 802.3). + Crc32 Code = 0x0132 // crc32 + + // Crc64Ecma is a draft code tagged "hash" and described by: CRC-64 non-cryptographic hash algorithm (ECMA-182 - Annex B). + Crc64Ecma Code = 0x0164 // crc64-ecma + // Unix is a permanent code tagged "multiaddr". Unix Code = 0x0190 // unix @@ -366,6 +372,9 @@ const ( // Quic is a permanent code tagged "multiaddr". Quic Code = 0x01cc // quic + // QuicV1 is a permanent code tagged "multiaddr". + QuicV1 Code = 0x01cd // quic-v1 + // Webtransport is a draft code tagged "multiaddr". Webtransport Code = 0x01d1 // webtransport @@ -396,6 +405,9 @@ const ( // Car is a draft code tagged "serialization" and described by: Content Addressable aRchive (CAR). Car Code = 0x0202 // car + // IpnsRecord is a permanent code tagged "serialization" and described by: Signed IPNS Record. + IpnsRecord Code = 0x0300 // ipns-record + // Libp2pPeerRecord is a permanent code tagged "libp2p" and described by: libp2p peer record type. Libp2pPeerRecord Code = 0x0301 // libp2p-peer-record @@ -417,6 +429,9 @@ const ( // TransportGraphsyncFilecoinv1 is a draft code tagged "transport" and described by: Filecoin graphsync datatransfer. TransportGraphsyncFilecoinv1 Code = 0x0910 // transport-graphsync-filecoinv1 + // Multidid is a draft code tagged "multiformat" and described by: Compact encoding for Decentralized Identifers. + Multidid Code = 0x0d1d // multidid + // Sha2_256Trunc254Padded is a permanent code tagged "multihash" and described by: SHA2-256 with the two most significant bits from the last byte zeroed (as via a mask with 0b00111111) - used for proving trees as in Filecoin. Sha2_256Trunc254Padded Code = 0x1012 // sha2-256-trunc254-padded @@ -429,7 +444,7 @@ const ( // Sha2_512_256 is a permanent code tagged "multihash" and described by: aka SHA-512/256; as specified by FIPS 180-4.. Sha2_512_256 Code = 0x1015 // sha2-512-256 - // Murmur3X64_128 is a draft code tagged "multihash". + // Murmur3X64_128 is a draft code tagged "hash". Murmur3X64_128 Code = 0x1022 // murmur3-x64-128 // Ripemd128 is a draft code tagged "multihash". @@ -1449,6 +1464,18 @@ const ( // Skein1024_1024 is a draft code tagged "multihash". Skein1024_1024 Code = 0xb3e0 // skein1024-1024 + // Xxh32 is a draft code tagged "hash" and described by: Extremely fast non-cryptographic hash algorithm. + Xxh32 Code = 0xb3e1 // xxh-32 + + // Xxh64 is a draft code tagged "hash" and described by: Extremely fast non-cryptographic hash algorithm. + Xxh64 Code = 0xb3e2 // xxh-64 + + // Xxh3_64 is a draft code tagged "hash" and described by: Extremely fast non-cryptographic hash algorithm. + Xxh3_64 Code = 0xb3e3 // xxh3-64 + + // Xxh3_128 is a draft code tagged "hash" and described by: Extremely fast non-cryptographic hash algorithm. + Xxh3_128 Code = 0xb3e4 // xxh3-128 + // PoseidonBls12_381A2Fc1 is a permanent code tagged "multihash" and described by: Poseidon using BLS12-381 and arity of 2 with Filecoin parameters. PoseidonBls12_381A2Fc1 Code = 0xb401 // poseidon-bls12_381-a2-fc1 @@ -1491,6 +1518,9 @@ const ( // Eip191 is a draft code tagged "varsig" and described by: EIP-191 Ethereum Signed Data Standard. Eip191 Code = 0xd191 // eip-191 + // Jwk_jcsPub is a draft code tagged "key" and described by: JSON object containing only the required members of a JWK (RFC 7518 and RFC 7517) representing the public key. Serialisation based on JCS (RFC 8785). + Jwk_jcsPub Code = 0xeb51 // jwk_jcs-pub + // FilCommitmentUnsealed is a permanent code tagged "filecoin" and described by: Filecoin piece or sector data commitment merkle node/root (CommP & CommD). FilCommitmentUnsealed Code = 0xf101 // fil-commitment-unsealed @@ -1653,6 +1683,8 @@ var knownCodes = []Code{ DagJson, Udt, Utp, + Crc32, + Crc64Ecma, Unix, Thread, P2p, @@ -1665,6 +1697,7 @@ var knownCodes = []Code{ Sni, Noise, Quic, + QuicV1, Webtransport, Certhash, Ws, @@ -1675,6 +1708,7 @@ var knownCodes = []Code{ Json, Messagepack, Car, + IpnsRecord, Libp2pPeerRecord, Libp2pRelayRsvp, Memorytransport, @@ -1682,6 +1716,7 @@ var knownCodes = []Code{ CarMultihashIndexSorted, TransportBitswap, TransportGraphsyncFilecoinv1, + Multidid, Sha2_256Trunc254Padded, Sha2_224, Sha2_512_224, @@ -2026,6 +2061,10 @@ var knownCodes = []Code{ Skein1024_1008, Skein1024_1016, Skein1024_1024, + Xxh32, + Xxh64, + Xxh3_64, + Xxh3_128, PoseidonBls12_381A2Fc1, PoseidonBls12_381A2Fc1Sc, Urdca2015Canon, @@ -2040,6 +2079,7 @@ var knownCodes = []Code{ Bls12381G2Sig, Eddsa, Eip191, + Jwk_jcsPub, FilCommitmentUnsealed, FilCommitmentSealed, Plaintextv2, @@ -2070,6 +2110,17 @@ func (c Code) Tag() string { FilCommitmentSealed: return "filecoin" + case Murmur3X64_64, + Murmur3_32, + Crc32, + Crc64Ecma, + Murmur3X64_128, + Xxh32, + Xxh64, + Xxh3_64, + Xxh3_128: + return "hash" + case HolochainAdrV0, HolochainAdrV1, HolochainKeyV0, @@ -2144,7 +2195,8 @@ func (c Code) Tag() string { Ed25519Priv, Secp256k1Priv, X25519Priv, - RsaPriv: + RsaPriv, + Jwk_jcsPub: return "key" case Libp2pPeerRecord, @@ -2183,6 +2235,7 @@ func (c Code) Tag() string { Sni, Noise, Quic, + QuicV1, Webtransport, Certhash, Ws, @@ -2197,7 +2250,8 @@ func (c Code) Tag() string { Multihash, Multiaddr, Multibase, - Caip50: + Caip50, + Multidid: return "multiformat" case Identity, @@ -2216,8 +2270,6 @@ func (c Code) Tag() string { Keccak512, Blake3, Sha2_384, - Murmur3X64_64, - Murmur3_32, DblSha2_256, Md4, Md5, @@ -2225,7 +2277,6 @@ func (c Code) Tag() string { Sha2_224, Sha2_512_224, Sha2_512_256, - Murmur3X64_128, Ripemd128, Ripemd160, Ripemd256, @@ -2577,6 +2628,7 @@ func (c Code) Tag() string { Bencode, Messagepack, Car, + IpnsRecord, CarIndexSorted, CarMultihashIndexSorted, Ssz: diff --git a/vendor/github.com/multiformats/go-multicodec/version.json b/vendor/github.com/multiformats/go-multicodec/version.json index 9f6d6fca..8047016a 100644 --- a/vendor/github.com/multiformats/go-multicodec/version.json +++ b/vendor/github.com/multiformats/go-multicodec/version.json @@ -1,3 +1,3 @@ { - "version": "v0.7.0" + "version": "v0.8.1" } diff --git a/vendor/github.com/multiformats/go-multistream/.gitignore b/vendor/github.com/multiformats/go-multistream/.gitignore deleted file mode 100644 index 29585fe7..00000000 --- a/vendor/github.com/multiformats/go-multistream/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*-fuzz.zip diff --git a/vendor/github.com/multiformats/go-multistream/README.md b/vendor/github.com/multiformats/go-multistream/README.md index 9666d204..f766b693 100644 --- a/vendor/github.com/multiformats/go-multistream/README.md +++ b/vendor/github.com/multiformats/go-multistream/README.md @@ -53,7 +53,7 @@ import ( // "/cats" and "/dogs" and exposes it on a localhost:8765. It then opens connections // to that port, selects the protocols and tests that the handlers are working. func main() { - mux := ms.NewMultistreamMuxer() + mux := ms.NewMultistreamMuxer[string]() mux.AddHandler("/cats", func(proto string, rwc io.ReadWriteCloser) error { fmt.Fprintln(rwc, proto, ": HELLO I LIKE CATS") return rwc.Close() diff --git a/vendor/github.com/multiformats/go-multistream/client.go b/vendor/github.com/multiformats/go-multistream/client.go index 811e3b39..013dd5ab 100644 --- a/vendor/github.com/multiformats/go-multistream/client.go +++ b/vendor/github.com/multiformats/go-multistream/client.go @@ -13,9 +13,22 @@ import ( "strings" ) -// ErrNotSupported is the error returned when the muxer does not support -// the protocol specified for the handshake. -var ErrNotSupported = errors.New("protocol not supported") +// ErrNotSupported is the error returned when the muxer doesn't support +// the protocols tried for the handshake. +type ErrNotSupported[T StringLike] struct { + + // Slice of protocols that were not supported by the muxer + Protos []T +} + +func (e ErrNotSupported[T]) Error() string { + return fmt.Sprintf("protocols not supported: %v", e.Protos) +} + +func (e ErrNotSupported[T]) Is(target error) bool { + _, ok := target.(ErrNotSupported[T]) + return ok +} // ErrNoProtocols is the error returned when the no protocols have been // specified. @@ -31,7 +44,7 @@ const ( // to inform the muxer of the protocol that will be used to communicate // on this ReadWriteCloser. It returns an error if, for example, // the muxer does not know how to handle this protocol. -func SelectProtoOrFail(proto string, rwc io.ReadWriteCloser) (err error) { +func SelectProtoOrFail[T StringLike](proto T, rwc io.ReadWriteCloser) (err error) { defer func() { if rerr := recover(); rerr != nil { fmt.Fprintf(os.Stderr, "caught panic: %s\n%s\n", rerr, debug.Stack()) @@ -66,7 +79,7 @@ func SelectProtoOrFail(proto string, rwc io.ReadWriteCloser) (err error) { // SelectOneOf will perform handshakes with the protocols on the given slice // until it finds one which is supported by the muxer. -func SelectOneOf(protos []string, rwc io.ReadWriteCloser) (proto string, err error) { +func SelectOneOf[T StringLike](protos []T, rwc io.ReadWriteCloser) (proto T, err error) { defer func() { if rerr := recover(); rerr != nil { fmt.Fprintf(os.Stderr, "caught panic: %s\n%s\n", rerr, debug.Stack()) @@ -83,21 +96,25 @@ func SelectOneOf(protos []string, rwc io.ReadWriteCloser) (proto string, err err // can continue negotiating the rest of the protocols normally. // // This saves us a round trip. - switch err := SelectProtoOrFail(protos[0], rwc); err { + switch err := SelectProtoOrFail(protos[0], rwc); err.(type) { case nil: return protos[0], nil - case ErrNotSupported: // try others + case ErrNotSupported[T]: // try others default: return "", err } - return selectProtosOrFail(protos[1:], rwc) + proto, err = selectProtosOrFail(protos[1:], rwc) + if _, ok := err.(ErrNotSupported[T]); ok { + return "", ErrNotSupported[T]{protos} + } + return proto, err } const simOpenProtocol = "/libp2p/simultaneous-connect" // SelectWithSimopenOrFail performs protocol negotiation with the simultaneous open extension. // The returned boolean indicator will be true if we should act as a server. -func SelectWithSimopenOrFail(protos []string, rwc io.ReadWriteCloser) (proto string, isServer bool, err error) { +func SelectWithSimopenOrFail[T StringLike](protos []T, rwc io.ReadWriteCloser) (proto T, isServer bool, err error) { defer func() { if rerr := recover(); rerr != nil { fmt.Fprintf(os.Stderr, "caught panic: %s\n%s\n", rerr, debug.Stack()) @@ -125,7 +142,7 @@ func SelectWithSimopenOrFail(protos []string, rwc io.ReadWriteCloser) (proto str return "", false, err } - tok, err := ReadNextToken(rwc) + tok, err := ReadNextToken[T](rwc) if err != nil { return "", false, err } @@ -146,13 +163,13 @@ func SelectWithSimopenOrFail(protos []string, rwc io.ReadWriteCloser) (proto str } return proto, false, nil default: - return "", false, errors.New("unexpected response: " + tok) + return "", false, fmt.Errorf("unexpected response: %s", tok) } } -func clientOpen(protos []string, rwc io.ReadWriteCloser) (string, error) { +func clientOpen[T StringLike](protos []T, rwc io.ReadWriteCloser) (T, error) { // check to see if we selected the pipelined protocol - tok, err := ReadNextToken(rwc) + tok, err := ReadNextToken[T](rwc) if err != nil { return "", err } @@ -161,27 +178,31 @@ func clientOpen(protos []string, rwc io.ReadWriteCloser) (string, error) { case protos[0]: return tok, nil case "na": - return selectProtosOrFail(protos[1:], rwc) + proto, err := selectProtosOrFail(protos[1:], rwc) + if _, ok := err.(ErrNotSupported[T]); ok { + return "", ErrNotSupported[T]{protos} + } + return proto, err default: - return "", errors.New("unexpected response: " + tok) + return "", fmt.Errorf("unexpected response: %s", tok) } } -func selectProtosOrFail(protos []string, rwc io.ReadWriteCloser) (string, error) { +func selectProtosOrFail[T StringLike](protos []T, rwc io.ReadWriteCloser) (T, error) { for _, p := range protos { err := trySelect(p, rwc) - switch err { + switch err := err.(type) { case nil: return p, nil - case ErrNotSupported: + case ErrNotSupported[T]: default: return "", err } } - return "", ErrNotSupported + return "", ErrNotSupported[T]{protos} } -func simOpen(protos []string, rwc io.ReadWriteCloser) (string, bool, error) { +func simOpen[T StringLike](protos []T, rwc io.ReadWriteCloser) (T, bool, error) { randBytes := make([]byte, 8) _, err := rand.Read(randBytes) if err != nil { @@ -198,17 +219,17 @@ func simOpen(protos []string, rwc io.ReadWriteCloser) (string, bool, error) { // skip exactly one protocol // see https://github.com/multiformats/go-multistream/pull/42#discussion_r558757135 - _, err = ReadNextToken(rwc) + _, err = ReadNextToken[T](rwc) if err != nil { return "", false, err } // read the tie breaker nonce - tok, err := ReadNextToken(rwc) + tok, err := ReadNextToken[T](rwc) if err != nil { return "", false, err } - if !strings.HasPrefix(tok, tieBreakerPrefix) { + if !strings.HasPrefix(string(tok), tieBreakerPrefix) { return "", false, errors.New("tie breaker nonce not sent with the correct prefix") } @@ -216,7 +237,7 @@ func simOpen(protos []string, rwc io.ReadWriteCloser) (string, bool, error) { return "", false, err } - peerNonce, err := strconv.ParseUint(tok[len(tieBreakerPrefix):], 10, 64) + peerNonce, err := strconv.ParseUint(string(tok[len(tieBreakerPrefix):]), 10, 64) if err != nil { return "", false, err } @@ -228,7 +249,7 @@ func simOpen(protos []string, rwc io.ReadWriteCloser) (string, bool, error) { } iamserver = peerNonce > myNonce - var proto string + var proto T if iamserver { proto, err = simOpenSelectServer(protos, rwc) } else { @@ -238,29 +259,28 @@ func simOpen(protos []string, rwc io.ReadWriteCloser) (string, bool, error) { return proto, iamserver, err } -func simOpenSelectServer(protos []string, rwc io.ReadWriteCloser) (string, error) { +func simOpenSelectServer[T StringLike](protos []T, rwc io.ReadWriteCloser) (T, error) { werrCh := make(chan error, 1) go func() { err := delimWriteBuffered(rwc, []byte(responder)) werrCh <- err }() - tok, err := ReadNextToken(rwc) + tok, err := ReadNextToken[T](rwc) if err != nil { return "", err } if tok != initiator { - return "", errors.New("unexpected response: " + tok) + return "", fmt.Errorf("unexpected response: %s", tok) } if err = <-werrCh; err != nil { return "", err } - for { - tok, err = ReadNextToken(rwc) + tok, err = ReadNextToken[T](rwc) if err == io.EOF { - return "", ErrNotSupported + return "", ErrNotSupported[T]{protos} } if err != nil { @@ -286,19 +306,19 @@ func simOpenSelectServer(protos []string, rwc io.ReadWriteCloser) (string, error } -func simOpenSelectClient(protos []string, rwc io.ReadWriteCloser) (string, error) { +func simOpenSelectClient[T StringLike](protos []T, rwc io.ReadWriteCloser) (T, error) { werrCh := make(chan error, 1) go func() { err := delimWriteBuffered(rwc, []byte(initiator)) werrCh <- err }() - tok, err := ReadNextToken(rwc) + tok, err := ReadNextToken[T](rwc) if err != nil { return "", err } if tok != responder { - return "", errors.New("unexpected response: " + tok) + return "", fmt.Errorf("unexpected response: %s", tok) } if err = <-werrCh; err != nil { return "", err @@ -308,7 +328,7 @@ func simOpenSelectClient(protos []string, rwc io.ReadWriteCloser) (string, error } func readMultistreamHeader(r io.Reader) error { - tok, err := ReadNextToken(r) + tok, err := ReadNextToken[string](r) if err != nil { return err } @@ -319,7 +339,7 @@ func readMultistreamHeader(r io.Reader) error { return nil } -func trySelect(proto string, rwc io.ReadWriteCloser) error { +func trySelect[T StringLike](proto T, rwc io.ReadWriteCloser) error { err := delimWriteBuffered(rwc, []byte(proto)) if err != nil { return err @@ -327,8 +347,8 @@ func trySelect(proto string, rwc io.ReadWriteCloser) error { return readProto(proto, rwc) } -func readProto(proto string, r io.Reader) error { - tok, err := ReadNextToken(r) +func readProto[T StringLike](proto T, r io.Reader) error { + tok, err := ReadNextToken[T](r) if err != nil { return err } @@ -337,8 +357,8 @@ func readProto(proto string, r io.Reader) error { case proto: return nil case "na": - return ErrNotSupported + return ErrNotSupported[T]{[]T{proto}} default: - return errors.New("unrecognized response: " + tok) + return fmt.Errorf("unrecognized response: %s", tok) } } diff --git a/vendor/github.com/multiformats/go-multistream/lazyClient.go b/vendor/github.com/multiformats/go-multistream/lazyClient.go index 76d79ffe..6145eafc 100644 --- a/vendor/github.com/multiformats/go-multistream/lazyClient.go +++ b/vendor/github.com/multiformats/go-multistream/lazyClient.go @@ -8,9 +8,9 @@ import ( // NewMSSelect returns a new Multistream which is able to perform // protocol selection with a MultistreamMuxer. -func NewMSSelect(c io.ReadWriteCloser, proto string) LazyConn { - return &lazyClientConn{ - protos: []string{ProtocolID, proto}, +func NewMSSelect[T StringLike](c io.ReadWriteCloser, proto T) LazyConn { + return &lazyClientConn[T]{ + protos: []T{ProtocolID, proto}, con: c, } } @@ -18,9 +18,9 @@ func NewMSSelect(c io.ReadWriteCloser, proto string) LazyConn { // NewMultistream returns a multistream for the given protocol. This will not // perform any protocol selection. If you are using a MultistreamMuxer, use // NewMSSelect. -func NewMultistream(c io.ReadWriteCloser, proto string) LazyConn { - return &lazyClientConn{ - protos: []string{proto}, +func NewMultistream[T StringLike](c io.ReadWriteCloser, proto T) LazyConn { + return &lazyClientConn[T]{ + protos: []T{proto}, con: c, } } @@ -31,7 +31,7 @@ func NewMultistream(c io.ReadWriteCloser, proto string) LazyConn { // It *does not* block writes waiting for the other end to respond. Instead, it // simply assumes the negotiation went successfully and starts writing data. // See: https://github.com/multiformats/go-multistream/issues/20 -type lazyClientConn struct { +type lazyClientConn[T StringLike] struct { // Used to ensure we only trigger the write half of the handshake once. rhandshakeOnce sync.Once rerr error @@ -41,7 +41,7 @@ type lazyClientConn struct { werr error // The sequence of protocols to negotiate. - protos []string + protos []T // The inner connection. con io.ReadWriteCloser @@ -53,7 +53,7 @@ type lazyClientConn struct { // half of the handshake and then waits for the read half to complete. // // It returns an error if the read half of the handshake fails. -func (l *lazyClientConn) Read(b []byte) (int, error) { +func (l *lazyClientConn[T]) Read(b []byte) (int, error) { l.rhandshakeOnce.Do(func() { go l.whandshakeOnce.Do(l.doWriteHandshake) l.doReadHandshake() @@ -68,17 +68,17 @@ func (l *lazyClientConn) Read(b []byte) (int, error) { return l.con.Read(b) } -func (l *lazyClientConn) doReadHandshake() { +func (l *lazyClientConn[T]) doReadHandshake() { for _, proto := range l.protos { // read protocol - tok, err := ReadNextToken(l.con) + tok, err := ReadNextToken[T](l.con) if err != nil { l.rerr = err return } if tok == "na" { - l.rerr = ErrNotSupported + l.rerr = ErrNotSupported[T]{[]T{proto}} return } if tok != proto { @@ -88,12 +88,12 @@ func (l *lazyClientConn) doReadHandshake() { } } -func (l *lazyClientConn) doWriteHandshake() { +func (l *lazyClientConn[T]) doWriteHandshake() { l.doWriteHandshakeWithData(nil) } // Perform the write handshake but *also* write some extra data. -func (l *lazyClientConn) doWriteHandshakeWithData(extra []byte) int { +func (l *lazyClientConn[T]) doWriteHandshakeWithData(extra []byte) int { buf := getWriter(l.con) defer putWriter(buf) @@ -122,7 +122,7 @@ func (l *lazyClientConn) doWriteHandshakeWithData(extra []byte) int { // // Write *also* ignores errors from the read half of the handshake (in case the // stream is actually write only). -func (l *lazyClientConn) Write(b []byte) (int, error) { +func (l *lazyClientConn[T]) Write(b []byte) (int, error) { n := 0 l.whandshakeOnce.Do(func() { go l.rhandshakeOnce.Do(l.doReadHandshake) @@ -137,7 +137,7 @@ func (l *lazyClientConn) Write(b []byte) (int, error) { // Close closes the underlying io.ReadWriteCloser // // This does not flush anything. -func (l *lazyClientConn) Close() error { +func (l *lazyClientConn[T]) Close() error { // As the client, we flush the handshake on close to cover an // interesting edge-case where the server only speaks a single protocol // and responds eagerly with that protocol before waiting for out @@ -151,7 +151,7 @@ func (l *lazyClientConn) Close() error { } // Flush sends the handshake. -func (l *lazyClientConn) Flush() error { +func (l *lazyClientConn[T]) Flush() error { l.whandshakeOnce.Do(func() { go l.rhandshakeOnce.Do(l.doReadHandshake) l.doWriteHandshake() diff --git a/vendor/github.com/multiformats/go-multistream/multistream.go b/vendor/github.com/multiformats/go-multistream/multistream.go index 9f3a1a4c..17e1ef79 100644 --- a/vendor/github.com/multiformats/go-multistream/multistream.go +++ b/vendor/github.com/multiformats/go-multistream/multistream.go @@ -7,10 +7,9 @@ import ( "bufio" "errors" "fmt" + "io" "os" "runtime/debug" - - "io" "sync" "github.com/multiformats/go-varint" @@ -29,29 +28,35 @@ var writerPool = sync.Pool{ }, } +// StringLike is an interface that supports all types with underlying type +// string +type StringLike interface { + ~string +} + // HandlerFunc is a user-provided function used by the MultistreamMuxer to // handle a protocol/stream. -type HandlerFunc = func(protocol string, rwc io.ReadWriteCloser) error +type HandlerFunc[T StringLike] func(protocol T, rwc io.ReadWriteCloser) error // Handler is a wrapper to HandlerFunc which attaches a name (protocol) and a // match function which can optionally be used to select a handler by other // means than the name. -type Handler struct { - MatchFunc func(string) bool - Handle HandlerFunc - AddName string +type Handler[T StringLike] struct { + MatchFunc func(T) bool + Handle HandlerFunc[T] + AddName T } // MultistreamMuxer is a muxer for multistream. Depending on the stream // protocol tag it will select the right handler and hand the stream off to it. -type MultistreamMuxer struct { +type MultistreamMuxer[T StringLike] struct { handlerlock sync.RWMutex - handlers []Handler + handlers []Handler[T] } // NewMultistreamMuxer creates a muxer. -func NewMultistreamMuxer() *MultistreamMuxer { - return new(MultistreamMuxer) +func NewMultistreamMuxer[T StringLike]() *MultistreamMuxer[T] { + return new(MultistreamMuxer[T]) } // LazyConn is the connection type returned by the lazy negotiation functions. @@ -111,26 +116,26 @@ func delimWrite(w io.Writer, mes []byte) error { return nil } -func fulltextMatch(s string) func(string) bool { - return func(a string) bool { +func fulltextMatch[T StringLike](s T) func(T) bool { + return func(a T) bool { return a == s } } // AddHandler attaches a new protocol handler to the muxer. -func (msm *MultistreamMuxer) AddHandler(protocol string, handler HandlerFunc) { +func (msm *MultistreamMuxer[T]) AddHandler(protocol T, handler HandlerFunc[T]) { msm.AddHandlerWithFunc(protocol, fulltextMatch(protocol), handler) } // AddHandlerWithFunc attaches a new protocol handler to the muxer with a match. // If the match function returns true for a given protocol tag, the protocol // will be selected even if the handler name and protocol tags are different. -func (msm *MultistreamMuxer) AddHandlerWithFunc(protocol string, match func(string) bool, handler HandlerFunc) { +func (msm *MultistreamMuxer[T]) AddHandlerWithFunc(protocol T, match func(T) bool, handler HandlerFunc[T]) { msm.handlerlock.Lock() defer msm.handlerlock.Unlock() msm.removeHandler(protocol) - msm.handlers = append(msm.handlers, Handler{ + msm.handlers = append(msm.handlers, Handler[T]{ MatchFunc: match, Handle: handler, AddName: protocol, @@ -138,14 +143,14 @@ func (msm *MultistreamMuxer) AddHandlerWithFunc(protocol string, match func(stri } // RemoveHandler removes the handler with the given name from the muxer. -func (msm *MultistreamMuxer) RemoveHandler(protocol string) { +func (msm *MultistreamMuxer[T]) RemoveHandler(protocol T) { msm.handlerlock.Lock() defer msm.handlerlock.Unlock() msm.removeHandler(protocol) } -func (msm *MultistreamMuxer) removeHandler(protocol string) { +func (msm *MultistreamMuxer[T]) removeHandler(protocol T) { for i, h := range msm.handlers { if h.AddName == protocol { msm.handlers = append(msm.handlers[:i], msm.handlers[i+1:]...) @@ -155,11 +160,11 @@ func (msm *MultistreamMuxer) removeHandler(protocol string) { } // Protocols returns the list of handler-names added to this this muxer. -func (msm *MultistreamMuxer) Protocols() []string { +func (msm *MultistreamMuxer[T]) Protocols() []T { msm.handlerlock.RLock() defer msm.handlerlock.RUnlock() - var out []string + var out []T for _, h := range msm.handlers { out = append(out, h.AddName) } @@ -171,7 +176,7 @@ func (msm *MultistreamMuxer) Protocols() []string { // fails because of a ProtocolID mismatch. var ErrIncorrectVersion = errors.New("client connected with incorrect version") -func (msm *MultistreamMuxer) findHandler(proto string) *Handler { +func (msm *MultistreamMuxer[T]) findHandler(proto T) *Handler[T] { msm.handlerlock.RLock() defer msm.handlerlock.RUnlock() @@ -184,19 +189,9 @@ func (msm *MultistreamMuxer) findHandler(proto string) *Handler { return nil } -// NegotiateLazy performs protocol selection and returns -// a multistream, the protocol used, the handler and an error. It is lazy -// because the write-handshake is performed on a subroutine, allowing this -// to return before that handshake is completed. -// Deprecated: use Negotiate instead. -func (msm *MultistreamMuxer) NegotiateLazy(rwc io.ReadWriteCloser) (rwc_ io.ReadWriteCloser, proto string, handler HandlerFunc, err error) { - proto, handler, err = msm.Negotiate(rwc) - return rwc, proto, handler, err -} - // Negotiate performs protocol selection and returns the protocol name and // the matching handler function for it (or an error). -func (msm *MultistreamMuxer) Negotiate(rwc io.ReadWriteCloser) (proto string, handler HandlerFunc, err error) { +func (msm *MultistreamMuxer[T]) Negotiate(rwc io.ReadWriteCloser) (proto T, handler HandlerFunc[T], err error) { defer func() { if rerr := recover(); rerr != nil { fmt.Fprintf(os.Stderr, "caught panic: %s\n%s\n", rerr, debug.Stack()) @@ -209,8 +204,7 @@ func (msm *MultistreamMuxer) Negotiate(rwc io.ReadWriteCloser) (proto string, ha // other side has closed this rwc for writing. They may have sent us a // message and closed. Future writers will get an error anyways. _ = delimWriteBuffered(rwc, []byte(ProtocolID)) - - line, err := ReadNextToken(rwc) + line, err := ReadNextToken[T](rwc) if err != nil { return "", nil, err } @@ -223,7 +217,7 @@ func (msm *MultistreamMuxer) Negotiate(rwc io.ReadWriteCloser) (proto string, ha loop: for { // Now read and respond to commands until they send a valid protocol id - tok, err := ReadNextToken(rwc) + tok, err := ReadNextToken[T](rwc) if err != nil { return "", nil, err } @@ -250,7 +244,7 @@ loop: // Handle performs protocol negotiation on a ReadWriteCloser // (i.e. a connection). It will find a matching handler for the // incoming protocol and pass the ReadWriteCloser to it. -func (msm *MultistreamMuxer) Handle(rwc io.ReadWriteCloser) error { +func (msm *MultistreamMuxer[T]) Handle(rwc io.ReadWriteCloser) error { p, h, err := msm.Negotiate(rwc) if err != nil { return err @@ -260,13 +254,13 @@ func (msm *MultistreamMuxer) Handle(rwc io.ReadWriteCloser) error { // ReadNextToken extracts a token from a Reader. It is used during // protocol negotiation and returns a string. -func ReadNextToken(r io.Reader) (string, error) { +func ReadNextToken[T StringLike](r io.Reader) (T, error) { tok, err := ReadNextTokenBytes(r) if err != nil { return "", err } - return string(tok), nil + return T(tok), nil } // ReadNextTokenBytes extracts a token from a Reader. It is used diff --git a/vendor/github.com/multiformats/go-multistream/multistream_fuzz.go b/vendor/github.com/multiformats/go-multistream/multistream_fuzz.go deleted file mode 100644 index 3d65ca60..00000000 --- a/vendor/github.com/multiformats/go-multistream/multistream_fuzz.go +++ /dev/null @@ -1,29 +0,0 @@ -//go:build gofuzz -// +build gofuzz - -package multistream - -import "bytes" - -type rwc struct { - *bytes.Reader -} - -func (*rwc) Write(b []byte) (int, error) { - return len(b), nil -} - -func (*rwc) Close() error { - return nil -} - -func Fuzz(b []byte) int { - readStream := bytes.NewReader(b) - input := &rwc{readStream} - - mux := NewMultistreamMuxer() - mux.AddHandler("/a", nil) - mux.AddHandler("/b", nil) - _ = mux.Handle(input) - return 1 -} diff --git a/vendor/github.com/multiformats/go-multistream/version.json b/vendor/github.com/multiformats/go-multistream/version.json index 1f94dbba..26a7d478 100644 --- a/vendor/github.com/multiformats/go-multistream/version.json +++ b/vendor/github.com/multiformats/go-multistream/version.json @@ -1,3 +1,3 @@ { - "version": "v0.3.3" + "version": "v0.4.1" } diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go index ece35d17..743555dd 100644 --- a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go @@ -120,7 +120,10 @@ func (f Formatter) Fi(indentation uint, format string, args ...interface{}) stri } func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string { - out := fmt.Sprintf(f.style(format), args...) + out := f.style(format) + if len(args) > 0 { + out = fmt.Sprintf(out, args...) + } if indentation == 0 && maxWidth == 0 { return out diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go index 496ec4a2..86da7340 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go @@ -25,7 +25,16 @@ func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite return suite } - args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./") + ginkgoInvocationPath, _ := os.Getwd() + ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath) + packagePath := suite.AbsPath() + pathToInvocationPath, err := filepath.Rel(packagePath, ginkgoInvocationPath) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error()) + return suite + } + args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./", pathToInvocationPath) if err != nil { suite.State = TestSuiteStateFailedToCompile suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error()) diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 7a27220c..56b7be75 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -12,6 +12,7 @@ import ( "io" "runtime" "strings" + "sync" "time" "github.com/onsi/ginkgo/v2/formatter" @@ -23,7 +24,7 @@ type DefaultReporter struct { writer io.Writer // managing the emission stream - lastChar string + lastCharWasNewline bool lastEmissionWasDelimiter bool // rendering @@ -32,6 +33,7 @@ type DefaultReporter struct { formatter formatter.Formatter runningInParallel bool + lock *sync.Mutex } func NewDefaultReporterUnderTest(conf types.ReporterConfig, writer io.Writer) *DefaultReporter { @@ -46,12 +48,13 @@ func NewDefaultReporter(conf types.ReporterConfig, writer io.Writer) *DefaultRep conf: conf, writer: writer, - lastChar: "\n", + lastCharWasNewline: true, lastEmissionWasDelimiter: false, specDenoter: "•", retryDenoter: "↺", formatter: formatter.NewWithNoColorBool(conf.NoColor), + lock: &sync.Mutex{}, } if runtime.GOOS == "windows" { reporter.specDenoter = "+" @@ -528,7 +531,7 @@ func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) { } func (r *DefaultReporter) emitReportEntry(indent uint, entry types.ReportEntry) { - r.emitBlock(r.fi(indent, "{{bold}}"+entry.Name+"{{gray}} - %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT))) + r.emitBlock(r.fi(indent, "{{bold}}"+entry.Name+"{{gray}} "+fmt.Sprintf("- %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT)))) if representation := entry.StringRepresentation(); representation != "" { r.emitBlock(r.fi(indent+1, representation)) } @@ -619,31 +622,37 @@ func (r *DefaultReporter) emitSource(indent uint, fc types.FunctionCall) { /* Emitting to the writer */ func (r *DefaultReporter) emit(s string) { - if len(s) > 0 { - r.lastChar = s[len(s)-1:] - r.lastEmissionWasDelimiter = false - r.writer.Write([]byte(s)) - } + r._emit(s, false, false) } func (r *DefaultReporter) emitBlock(s string) { - if len(s) > 0 { - if r.lastChar != "\n" { - r.emit("\n") - } - r.emit(s) - if r.lastChar != "\n" { - r.emit("\n") - } - } + r._emit(s, true, false) } func (r *DefaultReporter) emitDelimiter(indent uint) { - if r.lastEmissionWasDelimiter { + r._emit(r.fi(indent, "{{gray}}%s{{/}}", strings.Repeat("-", 30)), true, true) +} + +// a bit ugly - but we're trying to minimize locking on this hot codepath +func (r *DefaultReporter) _emit(s string, block bool, isDelimiter bool) { + if len(s) == 0 { + return + } + r.lock.Lock() + defer r.lock.Unlock() + if isDelimiter && r.lastEmissionWasDelimiter { return } - r.emitBlock(r.fi(indent, "{{gray}}%s{{/}}", strings.Repeat("-", 30))) - r.lastEmissionWasDelimiter = true + if block && !r.lastCharWasNewline { + r.writer.Write([]byte("\n")) + } + r.lastCharWasNewline = (s[len(s)-1:] == "\n") + r.writer.Write([]byte(s)) + if block && !r.lastCharWasNewline { + r.writer.Write([]byte("\n")) + r.lastCharWasNewline = true + } + r.lastEmissionWasDelimiter = isDelimiter } /* Rendering text */ diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index fb87e24d..ca98609d 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -36,6 +36,9 @@ type JunitReportConfig struct { // Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name OmitLeafNodeType bool + + // Enable OmitSuiteSetupNodes to prevent the creation of testcase entries for setup nodes + OmitSuiteSetupNodes bool } type JUnitTestSuites struct { @@ -177,6 +180,9 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit }, } for _, spec := range report.SpecReports { + if config.OmitSuiteSetupNodes && spec.LeafNodeType != types.NodeTypeIt { + continue + } name := fmt.Sprintf("[%s]", spec.LeafNodeType) if config.OmitLeafNodeType { name = "" diff --git a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go index 12910918..9cd57681 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go @@ -7,6 +7,7 @@ import ( "runtime" "runtime/debug" "strings" + "sync" ) type CodeLocation struct { @@ -38,6 +39,73 @@ func (codeLocation CodeLocation) ContentsOfLine() string { return lines[codeLocation.LineNumber-1] } +type codeLocationLocator struct { + pcs map[uintptr]bool + helpers map[string]bool + lock *sync.Mutex +} + +func (c *codeLocationLocator) addHelper(pc uintptr) { + c.lock.Lock() + defer c.lock.Unlock() + + if c.pcs[pc] { + return + } + c.lock.Unlock() + f := runtime.FuncForPC(pc) + c.lock.Lock() + if f == nil { + return + } + c.helpers[f.Name()] = true + c.pcs[pc] = true +} + +func (c *codeLocationLocator) hasHelper(name string) bool { + c.lock.Lock() + defer c.lock.Unlock() + return c.helpers[name] +} + +func (c *codeLocationLocator) getCodeLocation(skip int) CodeLocation { + pc := make([]uintptr, 40) + n := runtime.Callers(skip+2, pc) + if n == 0 { + return CodeLocation{} + } + pc = pc[:n] + frames := runtime.CallersFrames(pc) + for { + frame, more := frames.Next() + if !c.hasHelper(frame.Function) { + return CodeLocation{FileName: frame.File, LineNumber: frame.Line} + } + if !more { + break + } + } + return CodeLocation{} +} + +var clLocator = &codeLocationLocator{ + pcs: map[uintptr]bool{}, + helpers: map[string]bool{}, + lock: &sync.Mutex{}, +} + +// MarkAsHelper is used by GinkgoHelper to mark the caller (appropriately offset by skip)as a helper. You can use this directly if you need to provide an optional `skip` to mark functions further up the call stack as helpers. +func MarkAsHelper(optionalSkip ...int) { + skip := 1 + if len(optionalSkip) > 0 { + skip += optionalSkip[0] + } + pc, _, _, ok := runtime.Caller(skip) + if ok { + clLocator.addHelper(pc) + } +} + func NewCustomCodeLocation(message string) CodeLocation { return CodeLocation{ CustomMessage: message, @@ -45,14 +113,13 @@ func NewCustomCodeLocation(message string) CodeLocation { } func NewCodeLocation(skip int) CodeLocation { - _, file, line, _ := runtime.Caller(skip + 1) - return CodeLocation{FileName: file, LineNumber: line} + return clLocator.getCodeLocation(skip + 1) } func NewCodeLocationWithStackTrace(skip int) CodeLocation { - _, file, line, _ := runtime.Caller(skip + 1) - stackTrace := PruneStack(string(debug.Stack()), skip+1) - return CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace} + cl := clLocator.getCodeLocation(skip + 1) + cl.FullStackTrace = PruneStack(string(debug.Stack()), skip+1) + return cl } // PruneStack removes references to functions that are internal to Ginkgo diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index 4ec636eb..1014c7b4 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -8,6 +8,7 @@ package types import ( "flag" "os" + "path/filepath" "runtime" "strconv" "strings" @@ -30,6 +31,7 @@ type SuiteConfig struct { PollProgressAfter time.Duration PollProgressInterval time.Duration Timeout time.Duration + EmitSpecProgress bool // this is deprecated but its removal is causing compile issue for some users that were setting it manually OutputInterceptorMode string SourceRoots []string GracePeriod time.Duration @@ -599,13 +601,29 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo } // GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test -func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string) ([]string, error) { +func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string, pathToInvocationPath string) ([]string, error) { // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure // the built test binary can generate a coverprofile if goFlagsConfig.CoverProfile != "" { goFlagsConfig.Cover = true } + if goFlagsConfig.CoverPkg != "" { + coverPkgs := strings.Split(goFlagsConfig.CoverPkg, ",") + adjustedCoverPkgs := make([]string, len(coverPkgs)) + for i, coverPkg := range coverPkgs { + coverPkg = strings.Trim(coverPkg, " ") + if strings.HasPrefix(coverPkg, "./") { + // this is a relative coverPkg - we need to reroot it + adjustedCoverPkgs[i] = "./" + filepath.Join(pathToInvocationPath, strings.TrimPrefix(coverPkg, "./")) + } else { + // this is a package name - don't touch it + adjustedCoverPkgs[i] = coverPkg + } + } + goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",") + } + args := []string{"test", "-c", "-o", destination, packageToBuild} goArgs, err := GenerateFlagArgs( GoBuildFlags, diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go index 0403f9e6..b0d3b651 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go @@ -272,12 +272,23 @@ func tokenize(input string) func() (*treeNode, error) { } } +func MustParseLabelFilter(input string) LabelFilter { + filter, err := ParseLabelFilter(input) + if err != nil { + panic(err) + } + return filter +} + func ParseLabelFilter(input string) (LabelFilter, error) { if DEBUG_LABEL_FILTER_PARSING { fmt.Println("\n==============") fmt.Println("Input: ", input) fmt.Print("Tokens: ") } + if input == "" { + return func(_ []string) bool { return true }, nil + } nextToken := tokenize(input) root := &treeNode{token: lfTokenRoot} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index ef61d5e7..8e7f7404 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.7.0" +const VERSION = "2.9.2" diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 7657f841..f4fc8845 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -115,32 +115,28 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { // textDecoder implements the Decoder interface for the text protocol. type textDecoder struct { r io.Reader - p TextParser - fams []*dto.MetricFamily + fams map[string]*dto.MetricFamily + err error } // Decode implements the Decoder interface. func (d *textDecoder) Decode(v *dto.MetricFamily) error { - // TODO(fabxc): Wrap this as a line reader to make streaming safer. - if len(d.fams) == 0 { - // No cached metric families, read everything and parse metrics. - fams, err := d.p.TextToMetricFamilies(d.r) - if err != nil { - return err - } - if len(fams) == 0 { - return io.EOF - } - d.fams = make([]*dto.MetricFamily, 0, len(fams)) - for _, f := range fams { - d.fams = append(d.fams, f) + if d.err == nil { + // Read all metrics in one shot. + var p TextParser + d.fams, d.err = p.TextToMetricFamilies(d.r) + // If we don't get an error, store io.EOF for the end. + if d.err == nil { + d.err = io.EOF } } - - *v = *d.fams[0] - d.fams = d.fams[1:] - - return nil + // Pick off one MetricFamily per Decode until there's nothing left. + for key, fam := range d.fams { + *v = *fam + delete(d.fams, key) + return nil + } + return d.err } // SampleDecoder wraps a Decoder to extract samples from the metric families diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 84be0643..ac248278 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -142,9 +142,13 @@ func (p *TextParser) reset(in io.Reader) { func (p *TextParser) startOfLine() stateFn { p.lineCount++ if p.skipBlankTab(); p.err != nil { - // End of input reached. This is the only case where - // that is not an error but a signal that we are done. - p.err = nil + // This is the only place that we expect to see io.EOF, + // which is not an error but the signal that we are done. + // Any other error that happens to align with the start of + // a line is still an error. + if p.err == io.EOF { + p.err = nil + } return nil } switch p.currentByte { diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index c909b8aa..5727452c 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -18,7 +18,6 @@ import ( "errors" "fmt" "math" - "regexp" "strconv" "strings" "time" @@ -183,54 +182,78 @@ func (d *Duration) Type() string { return "duration" } -var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$") +func isdigit(c byte) bool { return c >= '0' && c <= '9' } + +// Units are required to go in order from biggest to smallest. +// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day. +var unitMap = map[string]struct { + pos int + mult uint64 +}{ + "ms": {7, uint64(time.Millisecond)}, + "s": {6, uint64(time.Second)}, + "m": {5, uint64(time.Minute)}, + "h": {4, uint64(time.Hour)}, + "d": {3, uint64(24 * time.Hour)}, + "w": {2, uint64(7 * 24 * time.Hour)}, + "y": {1, uint64(365 * 24 * time.Hour)}, +} // ParseDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. -func ParseDuration(durationStr string) (Duration, error) { - switch durationStr { +func ParseDuration(s string) (Duration, error) { + switch s { case "0": // Allow 0 without a unit. return 0, nil case "": return 0, errors.New("empty duration string") } - matches := durationRE.FindStringSubmatch(durationStr) - if matches == nil { - return 0, fmt.Errorf("not a valid duration string: %q", durationStr) - } - var dur time.Duration - // Parse the match at pos `pos` in the regex and use `mult` to turn that - // into ms, then add that value to the total parsed duration. - var overflowErr error - m := func(pos int, mult time.Duration) { - if matches[pos] == "" { - return + orig := s + var dur uint64 + lastUnitPos := 0 + + for s != "" { + if !isdigit(s[0]) { + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + // Consume [0-9]* + i := 0 + for ; i < len(s) && isdigit(s[i]); i++ { + } + v, err := strconv.ParseUint(s[:i], 10, 0) + if err != nil { + return 0, fmt.Errorf("not a valid duration string: %q", orig) } - n, _ := strconv.Atoi(matches[pos]) + s = s[i:] + // Consume unit. + for i = 0; i < len(s) && !isdigit(s[i]); i++ { + } + if i == 0 { + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + u := s[:i] + s = s[i:] + unit, ok := unitMap[u] + if !ok { + return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig) + } + if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest. + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + lastUnitPos = unit.pos // Check if the provided duration overflows time.Duration (> ~ 290years). - if n > int((1<<63-1)/mult/time.Millisecond) { - overflowErr = errors.New("duration out of range") + if v > 1<<63/unit.mult { + return 0, errors.New("duration out of range") } - d := time.Duration(n) * time.Millisecond - dur += d * mult - - if dur < 0 { - overflowErr = errors.New("duration out of range") + dur += v * unit.mult + if dur > 1<<63-1 { + return 0, errors.New("duration out of range") } } - - m(2, 1000*60*60*24*365) // y - m(4, 1000*60*60*24*7) // w - m(6, 1000*60*60*24) // d - m(8, 1000*60*60) // h - m(10, 1000*60) // m - m(12, 1000) // s - m(14, 1) // ms - - return Duration(dur), overflowErr + return Duration(dur), nil } func (d Duration) String() string { diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index c9d8fb1a..9eb44041 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -16,20 +16,12 @@ package model import ( "encoding/json" "fmt" - "math" "sort" "strconv" "strings" ) var ( - // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a - // non-existing sample pair. It is a SamplePair with timestamp Earliest and - // value 0.0. Note that the natural zero value of SamplePair has a timestamp - // of 0, which is possible to appear in a real SamplePair and thus not - // suitable to signal a non-existing SamplePair. - ZeroSamplePair = SamplePair{Timestamp: Earliest} - // ZeroSample is the pseudo zero-value of Sample used to signal a // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, // and metric nil. Note that the natural zero value of Sample has a timestamp @@ -38,82 +30,14 @@ var ( ZeroSample = Sample{Timestamp: Earliest} ) -// A SampleValue is a representation of a value for a given sample at a given -// time. -type SampleValue float64 - -// MarshalJSON implements json.Marshaler. -func (v SampleValue) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (v *SampleValue) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") - } - f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) - if err != nil { - return err - } - *v = SampleValue(f) - return nil -} - -// Equal returns true if the value of v and o is equal or if both are NaN. Note -// that v==o is false if both are NaN. If you want the conventional float -// behavior, use == to compare two SampleValues. -func (v SampleValue) Equal(o SampleValue) bool { - if v == o { - return true - } - return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) -} - -func (v SampleValue) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -// SamplePair pairs a SampleValue with a Timestamp. -type SamplePair struct { - Timestamp Time - Value SampleValue -} - -// MarshalJSON implements json.Marshaler. -func (s SamplePair) MarshalJSON() ([]byte, error) { - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Value) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *SamplePair) UnmarshalJSON(b []byte) error { - v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. -func (s *SamplePair) Equal(o *SamplePair) bool { - return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) -} - -func (s SamplePair) String() string { - return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) -} - -// Sample is a sample pair associated with a metric. +// Sample is a sample pair associated with a metric. A single sample must either +// define Value or Histogram but not both. Histogram == nil implies the Value +// field is used, otherwise it should be ignored. type Sample struct { - Metric Metric `json:"metric"` - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` + Histogram *SampleHistogram `json:"histogram"` } // Equal compares first the metrics, then the timestamp, then the value. The @@ -129,11 +53,19 @@ func (s *Sample) Equal(o *Sample) bool { if !s.Timestamp.Equal(o.Timestamp) { return false } - + if s.Histogram != nil { + return s.Histogram.Equal(o.Histogram) + } return s.Value.Equal(o.Value) } func (s Sample) String() string { + if s.Histogram != nil { + return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }) + } return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ Timestamp: s.Timestamp, Value: s.Value, @@ -142,6 +74,19 @@ func (s Sample) String() string { // MarshalJSON implements json.Marshaler. func (s Sample) MarshalJSON() ([]byte, error) { + if s.Histogram != nil { + v := struct { + Metric Metric `json:"metric"` + Histogram SampleHistogramPair `json:"histogram"` + }{ + Metric: s.Metric, + Histogram: SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }, + } + return json.Marshal(&v) + } v := struct { Metric Metric `json:"metric"` Value SamplePair `json:"value"` @@ -152,21 +97,25 @@ func (s Sample) MarshalJSON() ([]byte, error) { Value: s.Value, }, } - return json.Marshal(&v) } // UnmarshalJSON implements json.Unmarshaler. func (s *Sample) UnmarshalJSON(b []byte) error { v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + Histogram SampleHistogramPair `json:"histogram"` }{ Metric: s.Metric, Value: SamplePair{ Timestamp: s.Timestamp, Value: s.Value, }, + Histogram: SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }, } if err := json.Unmarshal(b, &v); err != nil { @@ -174,8 +123,13 @@ func (s *Sample) UnmarshalJSON(b []byte) error { } s.Metric = v.Metric - s.Timestamp = v.Value.Timestamp - s.Value = v.Value.Value + if v.Histogram.Histogram != nil { + s.Timestamp = v.Histogram.Timestamp + s.Histogram = v.Histogram.Histogram + } else { + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + } return nil } @@ -221,80 +175,76 @@ func (s Samples) Equal(o Samples) bool { // SampleStream is a stream of Values belonging to an attached COWMetric. type SampleStream struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` } func (ss SampleStream) String() string { - vals := make([]string, len(ss.Values)) + valuesLength := len(ss.Values) + vals := make([]string, valuesLength+len(ss.Histograms)) for i, v := range ss.Values { vals[i] = v.String() } + for i, v := range ss.Histograms { + vals[i+valuesLength] = v.String() + } return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) } -// Value is a generic interface for values resulting from a query evaluation. -type Value interface { - Type() ValueType - String() string +func (ss SampleStream) MarshalJSON() ([]byte, error) { + if len(ss.Histograms) > 0 && len(ss.Values) > 0 { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Values: ss.Values, + Histograms: ss.Histograms, + } + return json.Marshal(&v) + } else if len(ss.Histograms) > 0 { + v := struct { + Metric Metric `json:"metric"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Histograms: ss.Histograms, + } + return json.Marshal(&v) + } else { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + }{ + Metric: ss.Metric, + Values: ss.Values, + } + return json.Marshal(&v) + } } -func (Matrix) Type() ValueType { return ValMatrix } -func (Vector) Type() ValueType { return ValVector } -func (*Scalar) Type() ValueType { return ValScalar } -func (*String) Type() ValueType { return ValString } - -type ValueType int - -const ( - ValNone ValueType = iota - ValScalar - ValVector - ValMatrix - ValString -) - -// MarshalJSON implements json.Marshaler. -func (et ValueType) MarshalJSON() ([]byte, error) { - return json.Marshal(et.String()) -} +func (ss *SampleStream) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Values: ss.Values, + Histograms: ss.Histograms, + } -func (et *ValueType) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { + if err := json.Unmarshal(b, &v); err != nil { return err } - switch s { - case "": - *et = ValNone - case "scalar": - *et = ValScalar - case "vector": - *et = ValVector - case "matrix": - *et = ValMatrix - case "string": - *et = ValString - default: - return fmt.Errorf("unknown value type %q", s) - } - return nil -} -func (e ValueType) String() string { - switch e { - case ValNone: - return "" - case ValScalar: - return "scalar" - case ValVector: - return "vector" - case ValMatrix: - return "matrix" - case ValString: - return "string" - } - panic("ValueType.String: unhandled value type") + ss.Metric = v.Metric + ss.Values = v.Values + ss.Histograms = v.Histograms + + return nil } // Scalar is a scalar value evaluated at the set timestamp. diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go new file mode 100644 index 00000000..0f615a70 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -0,0 +1,100 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "strconv" +) + +var ( + // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a + // non-existing sample pair. It is a SamplePair with timestamp Earliest and + // value 0.0. Note that the natural zero value of SamplePair has a timestamp + // of 0, which is possible to appear in a real SamplePair and thus not + // suitable to signal a non-existing SamplePair. + ZeroSamplePair = SamplePair{Timestamp: Earliest} +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go new file mode 100644 index 00000000..54bb038c --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -0,0 +1,178 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type FloatString float64 + +func (v FloatString) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +func (v FloatString) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +func (v *FloatString) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("float value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = FloatString(f) + return nil +} + +type HistogramBucket struct { + Boundaries int32 + Lower FloatString + Upper FloatString + Count FloatString +} + +func (s HistogramBucket) MarshalJSON() ([]byte, error) { + b, err := json.Marshal(s.Boundaries) + if err != nil { + return nil, err + } + l, err := json.Marshal(s.Lower) + if err != nil { + return nil, err + } + u, err := json.Marshal(s.Upper) + if err != nil { + return nil, err + } + c, err := json.Marshal(s.Count) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil +} + +func (s *HistogramBucket) UnmarshalJSON(buf []byte) error { + tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count} + wantLen := len(tmp) + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + if gotLen := len(tmp); gotLen != wantLen { + return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) + } + return nil +} + +func (s *HistogramBucket) Equal(o *HistogramBucket) bool { + return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count) +} + +func (b HistogramBucket) String() string { + var sb strings.Builder + lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3 + upperInclusive := b.Boundaries == 0 || b.Boundaries == 3 + if lowerInclusive { + sb.WriteRune('[') + } else { + sb.WriteRune('(') + } + fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + if upperInclusive { + sb.WriteRune(']') + } else { + sb.WriteRune(')') + } + fmt.Fprintf(&sb, ":%v", b.Count) + return sb.String() +} + +type HistogramBuckets []*HistogramBucket + +func (s HistogramBuckets) Equal(o HistogramBuckets) bool { + if len(s) != len(o) { + return false + } + + for i, bucket := range s { + if !bucket.Equal(o[i]) { + return false + } + } + return true +} + +type SampleHistogram struct { + Count FloatString `json:"count"` + Sum FloatString `json:"sum"` + Buckets HistogramBuckets `json:"buckets"` +} + +func (s SampleHistogram) String() string { + return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets) +} + +func (s *SampleHistogram) Equal(o *SampleHistogram) bool { + return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets)) +} + +type SampleHistogramPair struct { + Timestamp Time + // Histogram should never be nil, it's only stored as pointer for efficiency. + Histogram *SampleHistogram +} + +func (s SampleHistogramPair) MarshalJSON() ([]byte, error) { + if s.Histogram == nil { + return nil, fmt.Errorf("histogram is nil") + } + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Histogram) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error { + tmp := []interface{}{&s.Timestamp, &s.Histogram} + wantLen := len(tmp) + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + if gotLen := len(tmp); gotLen != wantLen { + return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) + } + if s.Histogram == nil { + return fmt.Errorf("histogram is null") + } + return nil +} + +func (s SampleHistogramPair) String() string { + return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp) +} + +func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool { + return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp)) +} diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go new file mode 100644 index 00000000..726c50ee --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_type.go @@ -0,0 +1,83 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" +) + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} diff --git a/vendor/github.com/quic-go/qpack/.codecov.yml b/vendor/github.com/quic-go/qpack/.codecov.yml new file mode 100644 index 00000000..00064af3 --- /dev/null +++ b/vendor/github.com/quic-go/qpack/.codecov.yml @@ -0,0 +1,7 @@ +coverage: + round: nearest + status: + project: + default: + threshold: 1 + patch: false diff --git a/vendor/github.com/quic-go/qpack/.gitignore b/vendor/github.com/quic-go/qpack/.gitignore new file mode 100644 index 00000000..66c189a0 --- /dev/null +++ b/vendor/github.com/quic-go/qpack/.gitignore @@ -0,0 +1,6 @@ +fuzzing/*.zip +fuzzing/coverprofile +fuzzing/crashers +fuzzing/sonarprofile +fuzzing/suppressions +fuzzing/corpus/ diff --git a/vendor/github.com/quic-go/qpack/.gitmodules b/vendor/github.com/quic-go/qpack/.gitmodules new file mode 100644 index 00000000..5ac16f08 --- /dev/null +++ b/vendor/github.com/quic-go/qpack/.gitmodules @@ -0,0 +1,3 @@ +[submodule "integrationtests/interop/qifs"] + path = integrationtests/interop/qifs + url = https://github.com/qpackers/qifs.git diff --git a/vendor/github.com/quic-go/qpack/.golangci.yml b/vendor/github.com/quic-go/qpack/.golangci.yml new file mode 100644 index 00000000..4a91adc7 --- /dev/null +++ b/vendor/github.com/quic-go/qpack/.golangci.yml @@ -0,0 +1,27 @@ +run: +linters-settings: +linters: + disable-all: true + enable: + - asciicheck + - deadcode + - exhaustive + - exportloopref + - goconst + - gofmt # redundant, since gofmt *should* be a no-op after gofumpt + - gofumpt + - goimports + - gosimple + - ineffassign + - misspell + - prealloc + - scopelint + - staticcheck + - stylecheck + - structcheck + - unconvert + - unparam + - unused + - varcheck + - vet + diff --git a/vendor/github.com/quic-go/qpack/LICENSE.md b/vendor/github.com/quic-go/qpack/LICENSE.md new file mode 100644 index 00000000..1ac5a2d9 --- /dev/null +++ b/vendor/github.com/quic-go/qpack/LICENSE.md @@ -0,0 +1,7 @@ +Copyright 2019 Marten Seemann + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/quic-go/qpack/README.md b/vendor/github.com/quic-go/qpack/README.md new file mode 100644 index 00000000..6ba4bad4 --- /dev/null +++ b/vendor/github.com/quic-go/qpack/README.md @@ -0,0 +1,20 @@ +# QPACK + +[![Godoc Reference](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](https://godoc.org/github.com/marten-seemann/qpack) +[![Code Coverage](https://img.shields.io/codecov/c/github/marten-seemann/qpack/master.svg?style=flat-square)](https://codecov.io/gh/marten-seemann/qpack) + +This is a minimal QPACK ([RFC 9204](https://datatracker.ietf.org/doc/html/rfc9204)) implementation in Go. It is minimal in the sense that it doesn't use the dynamic table at all, but just the static table and (Huffman encoded) string literals. Wherever possible, it reuses code from the [HPACK implementation in the Go standard library](https://github.com/golang/net/tree/master/http2/hpack). + +It should be able to interoperate with other QPACK implemetations (both encoders and decoders), however it won't achieve a high compression efficiency. + +## Running the interop tests + +Install the [QPACK interop files](https://github.com/qpackers/qifs/) by running +```bash +git submodule update --init --recursive +``` + +Then run the tests: +```bash +ginkgo -r integrationtests +``` diff --git a/vendor/github.com/quic-go/qpack/decoder.go b/vendor/github.com/quic-go/qpack/decoder.go new file mode 100644 index 00000000..c9001941 --- /dev/null +++ b/vendor/github.com/quic-go/qpack/decoder.go @@ -0,0 +1,271 @@ +package qpack + +import ( + "bytes" + "errors" + "fmt" + "sync" + + "golang.org/x/net/http2/hpack" +) + +// A decodingError is something the spec defines as a decoding error. +type decodingError struct { + err error +} + +func (de decodingError) Error() string { + return fmt.Sprintf("decoding error: %v", de.err) +} + +// An invalidIndexError is returned when an encoder references a table +// entry before the static table or after the end of the dynamic table. +type invalidIndexError int + +func (e invalidIndexError) Error() string { + return fmt.Sprintf("invalid indexed representation index %d", int(e)) +} + +var errNoDynamicTable = decodingError{errors.New("no dynamic table")} + +// errNeedMore is an internal sentinel error value that means the +// buffer is truncated and we need to read more data before we can +// continue parsing. +var errNeedMore = errors.New("need more data") + +// A Decoder is the decoding context for incremental processing of +// header blocks. +type Decoder struct { + mutex sync.Mutex + + emitFunc func(f HeaderField) + + readRequiredInsertCount bool + readDeltaBase bool + + // buf is the unparsed buffer. It's only written to + // saveBuf if it was truncated in the middle of a header + // block. Because it's usually not owned, we can only + // process it under Write. + buf []byte // not owned; only valid during Write + + // saveBuf is previous data passed to Write which we weren't able + // to fully parse before. Unlike buf, we own this data. + saveBuf bytes.Buffer +} + +// NewDecoder returns a new decoder +// The emitFunc will be called for each valid field parsed, +// in the same goroutine as calls to Write, before Write returns. +func NewDecoder(emitFunc func(f HeaderField)) *Decoder { + return &Decoder{emitFunc: emitFunc} +} + +func (d *Decoder) Write(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + + d.mutex.Lock() + n, err := d.writeLocked(p) + d.mutex.Unlock() + return n, err +} + +func (d *Decoder) writeLocked(p []byte) (int, error) { + // Only copy the data if we have to. Optimistically assume + // that p will contain a complete header block. + if d.saveBuf.Len() == 0 { + d.buf = p + } else { + d.saveBuf.Write(p) + d.buf = d.saveBuf.Bytes() + d.saveBuf.Reset() + } + + if err := d.decode(); err != nil { + if err != errNeedMore { + return 0, err + } + // TODO: limit the size of the buffer + d.saveBuf.Write(d.buf) + } + return len(p), nil +} + +// DecodeFull decodes an entire block. +func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { + if len(p) == 0 { + return []HeaderField{}, nil + } + + d.mutex.Lock() + defer d.mutex.Unlock() + + saveFunc := d.emitFunc + defer func() { d.emitFunc = saveFunc }() + + var hf []HeaderField + d.emitFunc = func(f HeaderField) { hf = append(hf, f) } + if _, err := d.writeLocked(p); err != nil { + return nil, err + } + if err := d.Close(); err != nil { + return nil, err + } + return hf, nil +} + +// Close declares that the decoding is complete and resets the Decoder +// to be reused again for a new header block. If there is any remaining +// data in the decoder's buffer, Close returns an error. +func (d *Decoder) Close() error { + if d.saveBuf.Len() > 0 { + d.saveBuf.Reset() + return decodingError{errors.New("truncated headers")} + } + d.readRequiredInsertCount = false + d.readDeltaBase = false + return nil +} + +func (d *Decoder) decode() error { + if !d.readRequiredInsertCount { + requiredInsertCount, rest, err := readVarInt(8, d.buf) + if err != nil { + return err + } + d.readRequiredInsertCount = true + if requiredInsertCount != 0 { + return decodingError{errors.New("expected Required Insert Count to be zero")} + } + d.buf = rest + } + if !d.readDeltaBase { + base, rest, err := readVarInt(7, d.buf) + if err != nil { + return err + } + d.readDeltaBase = true + if base != 0 { + return decodingError{errors.New("expected Base to be zero")} + } + d.buf = rest + } + if len(d.buf) == 0 { + return errNeedMore + } + + for len(d.buf) > 0 { + b := d.buf[0] + var err error + switch { + case b&0x80 > 0: // 1xxxxxxx + err = d.parseIndexedHeaderField() + case b&0xc0 == 0x40: // 01xxxxxx + err = d.parseLiteralHeaderField() + case b&0xe0 == 0x20: // 001xxxxx + err = d.parseLiteralHeaderFieldWithoutNameReference() + default: + err = fmt.Errorf("unexpected type byte: %#x", b) + } + if err != nil { + return err + } + } + return nil +} + +func (d *Decoder) parseIndexedHeaderField() error { + buf := d.buf + if buf[0]&0x40 == 0 { + return errNoDynamicTable + } + index, buf, err := readVarInt(6, buf) + if err != nil { + return err + } + hf, ok := d.at(index) + if !ok { + return decodingError{invalidIndexError(index)} + } + d.emitFunc(hf) + d.buf = buf + return nil +} + +func (d *Decoder) parseLiteralHeaderField() error { + buf := d.buf + if buf[0]&0x20 > 0 || buf[0]&0x10 == 0 { + return errNoDynamicTable + } + index, buf, err := readVarInt(4, buf) + if err != nil { + return err + } + hf, ok := d.at(index) + if !ok { + return decodingError{invalidIndexError(index)} + } + if len(buf) == 0 { + return errNeedMore + } + usesHuffman := buf[0]&0x80 > 0 + val, buf, err := d.readString(buf, 7, usesHuffman) + if err != nil { + return err + } + hf.Value = val + d.emitFunc(hf) + d.buf = buf + return nil +} + +func (d *Decoder) parseLiteralHeaderFieldWithoutNameReference() error { + buf := d.buf + usesHuffmanForName := buf[0]&0x8 > 0 + name, buf, err := d.readString(buf, 3, usesHuffmanForName) + if err != nil { + return err + } + if len(buf) == 0 { + return errNeedMore + } + usesHuffmanForVal := buf[0]&0x80 > 0 + val, buf, err := d.readString(buf, 7, usesHuffmanForVal) + if err != nil { + return err + } + d.emitFunc(HeaderField{Name: name, Value: val}) + d.buf = buf + return nil +} + +func (d *Decoder) readString(buf []byte, n uint8, usesHuffman bool) (string, []byte, error) { + l, buf, err := readVarInt(n, buf) + if err != nil { + return "", nil, err + } + if uint64(len(buf)) < l { + return "", nil, errNeedMore + } + var val string + if usesHuffman { + var err error + val, err = hpack.HuffmanDecodeToString(buf[:l]) + if err != nil { + return "", nil, err + } + } else { + val = string(buf[:l]) + } + buf = buf[l:] + return val, buf, nil +} + +func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { + if i >= uint64(len(staticTableEntries)) { + return + } + return staticTableEntries[i], true +} diff --git a/vendor/github.com/quic-go/qpack/encoder.go b/vendor/github.com/quic-go/qpack/encoder.go new file mode 100644 index 00000000..ad695353 --- /dev/null +++ b/vendor/github.com/quic-go/qpack/encoder.go @@ -0,0 +1,95 @@ +package qpack + +import ( + "io" + + "golang.org/x/net/http2/hpack" +) + +// An Encoder performs QPACK encoding. +type Encoder struct { + wrotePrefix bool + + w io.Writer + buf []byte +} + +// NewEncoder returns a new Encoder which performs QPACK encoding. An +// encoded data is written to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: w} +} + +// WriteField encodes f into a single Write to e's underlying Writer. +// This function may also produce bytes for the Header Block Prefix +// if necessary. If produced, it is done before encoding f. +func (e *Encoder) WriteField(f HeaderField) error { + // write the Header Block Prefix + if !e.wrotePrefix { + e.buf = appendVarInt(e.buf, 8, 0) + e.buf = appendVarInt(e.buf, 7, 0) + e.wrotePrefix = true + } + + idxAndVals, nameFound := encoderMap[f.Name] + if nameFound { + if idxAndVals.values == nil { + if len(f.Value) == 0 { + e.writeIndexedField(idxAndVals.idx) + } else { + e.writeLiteralFieldWithNameReference(&f, idxAndVals.idx) + } + } else { + valIdx, valueFound := idxAndVals.values[f.Value] + if valueFound { + e.writeIndexedField(valIdx) + } else { + e.writeLiteralFieldWithNameReference(&f, idxAndVals.idx) + } + } + } else { + e.writeLiteralFieldWithoutNameReference(f) + } + + _, err := e.w.Write(e.buf) + e.buf = e.buf[:0] + return err +} + +// Close declares that the encoding is complete and resets the Encoder +// to be reused again for a new header block. +func (e *Encoder) Close() error { + e.wrotePrefix = false + return nil +} + +func (e *Encoder) writeLiteralFieldWithoutNameReference(f HeaderField) { + offset := len(e.buf) + e.buf = appendVarInt(e.buf, 3, hpack.HuffmanEncodeLength(f.Name)) + e.buf[offset] ^= 0x20 ^ 0x8 + e.buf = hpack.AppendHuffmanString(e.buf, f.Name) + offset = len(e.buf) + e.buf = appendVarInt(e.buf, 7, hpack.HuffmanEncodeLength(f.Value)) + e.buf[offset] ^= 0x80 + e.buf = hpack.AppendHuffmanString(e.buf, f.Value) +} + +// Encodes a header field whose name is present in one of the tables. +func (e *Encoder) writeLiteralFieldWithNameReference(f *HeaderField, id uint8) { + offset := len(e.buf) + e.buf = appendVarInt(e.buf, 4, uint64(id)) + // Set the 01NTxxxx pattern, forcing N to 0 and T to 1 + e.buf[offset] ^= 0x50 + offset = len(e.buf) + e.buf = appendVarInt(e.buf, 7, hpack.HuffmanEncodeLength(f.Value)) + e.buf[offset] ^= 0x80 + e.buf = hpack.AppendHuffmanString(e.buf, f.Value) +} + +// Encodes an indexed field, meaning it's entirely defined in one of the tables. +func (e *Encoder) writeIndexedField(id uint8) { + offset := len(e.buf) + e.buf = appendVarInt(e.buf, 6, uint64(id)) + // Set the 1Txxxxxx pattern, forcing T to 1 + e.buf[offset] ^= 0xc0 +} diff --git a/vendor/github.com/quic-go/qpack/header_field.go b/vendor/github.com/quic-go/qpack/header_field.go new file mode 100644 index 00000000..4c043a99 --- /dev/null +++ b/vendor/github.com/quic-go/qpack/header_field.go @@ -0,0 +1,16 @@ +package qpack + +// A HeaderField is a name-value pair. Both the name and value are +// treated as opaque sequences of octets. +type HeaderField struct { + Name string + Value string +} + +// IsPseudo reports whether the header field is an HTTP3 pseudo header. +// That is, it reports whether it starts with a colon. +// It is not otherwise guaranteed to be a valid pseudo header field, +// though. +func (hf HeaderField) IsPseudo() bool { + return len(hf.Name) != 0 && hf.Name[0] == ':' +} diff --git a/vendor/github.com/quic-go/qpack/static_table.go b/vendor/github.com/quic-go/qpack/static_table.go new file mode 100644 index 00000000..73c365e1 --- /dev/null +++ b/vendor/github.com/quic-go/qpack/static_table.go @@ -0,0 +1,255 @@ +package qpack + +var staticTableEntries = [...]HeaderField{ + {Name: ":authority"}, + {Name: ":path", Value: "/"}, + {Name: "age", Value: "0"}, + {Name: "content-disposition"}, + {Name: "content-length", Value: "0"}, + {Name: "cookie"}, + {Name: "date"}, + {Name: "etag"}, + {Name: "if-modified-since"}, + {Name: "if-none-match"}, + {Name: "last-modified"}, + {Name: "link"}, + {Name: "location"}, + {Name: "referer"}, + {Name: "set-cookie"}, + {Name: ":method", Value: "CONNECT"}, + {Name: ":method", Value: "DELETE"}, + {Name: ":method", Value: "GET"}, + {Name: ":method", Value: "HEAD"}, + {Name: ":method", Value: "OPTIONS"}, + {Name: ":method", Value: "POST"}, + {Name: ":method", Value: "PUT"}, + {Name: ":scheme", Value: "http"}, + {Name: ":scheme", Value: "https"}, + {Name: ":status", Value: "103"}, + {Name: ":status", Value: "200"}, + {Name: ":status", Value: "304"}, + {Name: ":status", Value: "404"}, + {Name: ":status", Value: "503"}, + {Name: "accept", Value: "*/*"}, + {Name: "accept", Value: "application/dns-message"}, + {Name: "accept-encoding", Value: "gzip, deflate, br"}, + {Name: "accept-ranges", Value: "bytes"}, + {Name: "access-control-allow-headers", Value: "cache-control"}, + {Name: "access-control-allow-headers", Value: "content-type"}, + {Name: "access-control-allow-origin", Value: "*"}, + {Name: "cache-control", Value: "max-age=0"}, + {Name: "cache-control", Value: "max-age=2592000"}, + {Name: "cache-control", Value: "max-age=604800"}, + {Name: "cache-control", Value: "no-cache"}, + {Name: "cache-control", Value: "no-store"}, + {Name: "cache-control", Value: "public, max-age=31536000"}, + {Name: "content-encoding", Value: "br"}, + {Name: "content-encoding", Value: "gzip"}, + {Name: "content-type", Value: "application/dns-message"}, + {Name: "content-type", Value: "application/javascript"}, + {Name: "content-type", Value: "application/json"}, + {Name: "content-type", Value: "application/x-www-form-urlencoded"}, + {Name: "content-type", Value: "image/gif"}, + {Name: "content-type", Value: "image/jpeg"}, + {Name: "content-type", Value: "image/png"}, + {Name: "content-type", Value: "text/css"}, + {Name: "content-type", Value: "text/html; charset=utf-8"}, + {Name: "content-type", Value: "text/plain"}, + {Name: "content-type", Value: "text/plain;charset=utf-8"}, + {Name: "range", Value: "bytes=0-"}, + {Name: "strict-transport-security", Value: "max-age=31536000"}, + {Name: "strict-transport-security", Value: "max-age=31536000; includesubdomains"}, + {Name: "strict-transport-security", Value: "max-age=31536000; includesubdomains; preload"}, + {Name: "vary", Value: "accept-encoding"}, + {Name: "vary", Value: "origin"}, + {Name: "x-content-type-options", Value: "nosniff"}, + {Name: "x-xss-protection", Value: "1; mode=block"}, + {Name: ":status", Value: "100"}, + {Name: ":status", Value: "204"}, + {Name: ":status", Value: "206"}, + {Name: ":status", Value: "302"}, + {Name: ":status", Value: "400"}, + {Name: ":status", Value: "403"}, + {Name: ":status", Value: "421"}, + {Name: ":status", Value: "425"}, + {Name: ":status", Value: "500"}, + {Name: "accept-language"}, + {Name: "access-control-allow-credentials", Value: "FALSE"}, + {Name: "access-control-allow-credentials", Value: "TRUE"}, + {Name: "access-control-allow-headers", Value: "*"}, + {Name: "access-control-allow-methods", Value: "get"}, + {Name: "access-control-allow-methods", Value: "get, post, options"}, + {Name: "access-control-allow-methods", Value: "options"}, + {Name: "access-control-expose-headers", Value: "content-length"}, + {Name: "access-control-request-headers", Value: "content-type"}, + {Name: "access-control-request-method", Value: "get"}, + {Name: "access-control-request-method", Value: "post"}, + {Name: "alt-svc", Value: "clear"}, + {Name: "authorization"}, + {Name: "content-security-policy", Value: "script-src 'none'; object-src 'none'; base-uri 'none'"}, + {Name: "early-data", Value: "1"}, + {Name: "expect-ct"}, + {Name: "forwarded"}, + {Name: "if-range"}, + {Name: "origin"}, + {Name: "purpose", Value: "prefetch"}, + {Name: "server"}, + {Name: "timing-allow-origin", Value: "*"}, + {Name: "upgrade-insecure-requests", Value: "1"}, + {Name: "user-agent"}, + {Name: "x-forwarded-for"}, + {Name: "x-frame-options", Value: "deny"}, + {Name: "x-frame-options", Value: "sameorigin"}, +} + +// Only needed for tests. +// use go:linkname to retrieve the static table. +// +//nolint:deadcode,unused +func getStaticTable() []HeaderField { + return staticTableEntries[:] +} + +type indexAndValues struct { + idx uint8 + values map[string]uint8 +} + +// A map of the header names from the static table to their index in the table. +// This is used by the encoder to quickly find if a header is in the static table +// and what value should be used to encode it. +// There's a second level of mapping for the headers that have some predefined +// values in the static table. +var encoderMap = map[string]indexAndValues{ + ":authority": {0, nil}, + ":path": {1, map[string]uint8{"/": 1}}, + "age": {2, map[string]uint8{"0": 2}}, + "content-disposition": {3, nil}, + "content-length": {4, map[string]uint8{"0": 4}}, + "cookie": {5, nil}, + "date": {6, nil}, + "etag": {7, nil}, + "if-modified-since": {8, nil}, + "if-none-match": {9, nil}, + "last-modified": {10, nil}, + "link": {11, nil}, + "location": {12, nil}, + "referer": {13, nil}, + "set-cookie": {14, nil}, + ":method": {15, map[string]uint8{ + "CONNECT": 15, + "DELETE": 16, + "GET": 17, + "HEAD": 18, + "OPTIONS": 19, + "POST": 20, + "PUT": 21, + }}, + ":scheme": {22, map[string]uint8{ + "http": 22, + "https": 23, + }}, + ":status": {24, map[string]uint8{ + "103": 24, + "200": 25, + "304": 26, + "404": 27, + "503": 28, + "100": 63, + "204": 64, + "206": 65, + "302": 66, + "400": 67, + "403": 68, + "421": 69, + "425": 70, + "500": 71, + }}, + "accept": {29, map[string]uint8{ + "*/*": 29, + "application/dns-message": 30, + }}, + "accept-encoding": {31, map[string]uint8{"gzip, deflate, br": 31}}, + "accept-ranges": {32, map[string]uint8{"bytes": 32}}, + "access-control-allow-headers": {33, map[string]uint8{ + "cache-control": 33, + "content-type": 34, + "*": 75, + }}, + "access-control-allow-origin": {35, map[string]uint8{"*": 35}}, + "cache-control": {36, map[string]uint8{ + "max-age=0": 36, + "max-age=2592000": 37, + "max-age=604800": 38, + "no-cache": 39, + "no-store": 40, + "public, max-age=31536000": 41, + }}, + "content-encoding": {42, map[string]uint8{ + "br": 42, + "gzip": 43, + }}, + "content-type": {44, map[string]uint8{ + "application/dns-message": 44, + "application/javascript": 45, + "application/json": 46, + "application/x-www-form-urlencoded": 47, + "image/gif": 48, + "image/jpeg": 49, + "image/png": 50, + "text/css": 51, + "text/html; charset=utf-8": 52, + "text/plain": 53, + "text/plain;charset=utf-8": 54, + }}, + "range": {55, map[string]uint8{"bytes=0-": 55}}, + "strict-transport-security": {56, map[string]uint8{ + "max-age=31536000": 56, + "max-age=31536000; includesubdomains": 57, + "max-age=31536000; includesubdomains; preload": 58, + }}, + "vary": {59, map[string]uint8{ + "accept-encoding": 59, + "origin": 60, + }}, + "x-content-type-options": {61, map[string]uint8{"nosniff": 61}}, + "x-xss-protection": {62, map[string]uint8{"1; mode=block": 62}}, + // ":status" is duplicated and takes index 63 to 71 + "accept-language": {72, nil}, + "access-control-allow-credentials": {73, map[string]uint8{ + "FALSE": 73, + "TRUE": 74, + }}, + // "access-control-allow-headers" is duplicated and takes index 75 + "access-control-allow-methods": {76, map[string]uint8{ + "get": 76, + "get, post, options": 77, + "options": 78, + }}, + "access-control-expose-headers": {79, map[string]uint8{"content-length": 79}}, + "access-control-request-headers": {80, map[string]uint8{"content-type": 80}}, + "access-control-request-method": {81, map[string]uint8{ + "get": 81, + "post": 82, + }}, + "alt-svc": {83, map[string]uint8{"clear": 83}}, + "authorization": {84, nil}, + "content-security-policy": {85, map[string]uint8{ + "script-src 'none'; object-src 'none'; base-uri 'none'": 85, + }}, + "early-data": {86, map[string]uint8{"1": 86}}, + "expect-ct": {87, nil}, + "forwarded": {88, nil}, + "if-range": {89, nil}, + "origin": {90, nil}, + "purpose": {91, map[string]uint8{"prefetch": 91}}, + "server": {92, nil}, + "timing-allow-origin": {93, map[string]uint8{"*": 93}}, + "upgrade-insecure-requests": {94, map[string]uint8{"1": 94}}, + "user-agent": {95, nil}, + "x-forwarded-for": {96, nil}, + "x-frame-options": {97, map[string]uint8{ + "deny": 97, + "sameorigin": 98, + }}, +} diff --git a/vendor/github.com/quic-go/qpack/tools.go b/vendor/github.com/quic-go/qpack/tools.go new file mode 100644 index 00000000..8f71eea2 --- /dev/null +++ b/vendor/github.com/quic-go/qpack/tools.go @@ -0,0 +1,5 @@ +//go:build tools + +package qpack + +import _ "github.com/onsi/ginkgo/v2/ginkgo" diff --git a/vendor/github.com/quic-go/qpack/varint.go b/vendor/github.com/quic-go/qpack/varint.go new file mode 100644 index 00000000..28d71122 --- /dev/null +++ b/vendor/github.com/quic-go/qpack/varint.go @@ -0,0 +1,66 @@ +package qpack + +// copied from the Go standard library HPACK implementation + +import "errors" + +var errVarintOverflow = errors.New("varint integer overflow") + +// appendVarInt appends i, as encoded in variable integer form using n +// bit prefix, to dst and returns the extended buffer. +// +// See +// http://http2.github.io/http2-spec/compression.html#integer.representation +func appendVarInt(dst []byte, n byte, i uint64) []byte { + k := uint64((1 << n) - 1) + if i < k { + return append(dst, byte(i)) + } + dst = append(dst, byte(k)) + i -= k + for ; i >= 128; i >>= 7 { + dst = append(dst, byte(0x80|(i&0x7f))) + } + return append(dst, byte(i)) +} + +// readVarInt reads an unsigned variable length integer off the +// beginning of p. n is the parameter as described in +// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. +// +// n must always be between 1 and 8. +// +// The returned remain buffer is either a smaller suffix of p, or err != nil. +// The error is errNeedMore if p doesn't contain a complete integer. +func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { + if n < 1 || n > 8 { + panic("bad n") + } + if len(p) == 0 { + return 0, p, errNeedMore + } + i = uint64(p[0]) + if n < 8 { + i &= (1 << uint64(n)) - 1 + } + if i < (1< 0 { + b := p[0] + p = p[1:] + i += uint64(b&127) << m + if b&128 == 0 { + return i, p, nil + } + m += 7 + if m >= 63 { // TODO: proper overflow check. making this up. + return 0, origP, errVarintOverflow + } + } + return 0, origP, errNeedMore +} diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/LICENSE b/vendor/github.com/quic-go/qtls-go1-19/LICENSE similarity index 100% rename from vendor/github.com/marten-seemann/qtls-go1-18/LICENSE rename to vendor/github.com/quic-go/qtls-go1-19/LICENSE diff --git a/vendor/github.com/quic-go/qtls-go1-19/README.md b/vendor/github.com/quic-go/qtls-go1-19/README.md new file mode 100644 index 00000000..bf41f1c5 --- /dev/null +++ b/vendor/github.com/quic-go/qtls-go1-19/README.md @@ -0,0 +1,6 @@ +# qtls + +[![Go Reference](https://pkg.go.dev/badge/github.com/quic-go/qtls-go1-19.svg)](https://pkg.go.dev/github.com/quic-go/qtls-go1-19) +[![.github/workflows/go-test.yml](https://github.com/quic-go/qtls-go1-19/actions/workflows/go-test.yml/badge.svg)](https://github.com/quic-go/qtls-go1-19/actions/workflows/go-test.yml) + +This repository contains a modified version of the standard library's TLS implementation, modified for the QUIC protocol. It is used by [quic-go](https://github.com/lucas-clemente/quic-go). diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/alert.go b/vendor/github.com/quic-go/qtls-go1-19/alert.go similarity index 100% rename from vendor/github.com/marten-seemann/qtls-go1-18/alert.go rename to vendor/github.com/quic-go/qtls-go1-19/alert.go diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/auth.go b/vendor/github.com/quic-go/qtls-go1-19/auth.go similarity index 100% rename from vendor/github.com/marten-seemann/qtls-go1-19/auth.go rename to vendor/github.com/quic-go/qtls-go1-19/auth.go diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/cipher_suites.go b/vendor/github.com/quic-go/qtls-go1-19/cipher_suites.go similarity index 100% rename from vendor/github.com/marten-seemann/qtls-go1-19/cipher_suites.go rename to vendor/github.com/quic-go/qtls-go1-19/cipher_suites.go diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/common.go b/vendor/github.com/quic-go/qtls-go1-19/common.go similarity index 99% rename from vendor/github.com/marten-seemann/qtls-go1-19/common.go rename to vendor/github.com/quic-go/qtls-go1-19/common.go index 2484da05..63e391bf 100644 --- a/vendor/github.com/marten-seemann/qtls-go1-19/common.go +++ b/vendor/github.com/quic-go/qtls-go1-19/common.go @@ -346,7 +346,7 @@ type clientSessionState struct { // SessionID-based resumption. In TLS 1.3 they were merged into PSK modes, which // are supported via this interface. // -//go:generate sh -c "mockgen -package qtls -destination mock_client_session_cache_test.go github.com/marten-seemann/qtls-go1-19 ClientSessionCache" +//go:generate sh -c "mockgen -package qtls -destination mock_client_session_cache_test.go github.com/quic-go/qtls-go1-19 ClientSessionCache" type ClientSessionCache = tls.ClientSessionCache // SignatureScheme is a tls.SignatureScheme @@ -1412,7 +1412,7 @@ func leafCertificate(c *Certificate) (*x509.Certificate, error) { } type handshakeMessage interface { - marshal() []byte + marshal() ([]byte, error) unmarshal([]byte) bool } diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/conn.go b/vendor/github.com/quic-go/qtls-go1-19/conn.go similarity index 97% rename from vendor/github.com/marten-seemann/qtls-go1-19/conn.go rename to vendor/github.com/quic-go/qtls-go1-19/conn.go index 5a17f7a1..19f24e95 100644 --- a/vendor/github.com/marten-seemann/qtls-go1-19/conn.go +++ b/vendor/github.com/quic-go/qtls-go1-19/conn.go @@ -1041,25 +1041,46 @@ func (c *Conn) writeRecordLocked(typ recordType, data []byte) (int, error) { return n, nil } -// writeRecord writes a TLS record with the given type and payload to the -// connection and updates the record layer state. -func (c *Conn) writeRecord(typ recordType, data []byte) (int, error) { +// writeHandshakeRecord writes a handshake message to the connection and updates +// the record layer state. If transcript is non-nil the marshalled message is +// written to it. +func (c *Conn) writeHandshakeRecord(msg handshakeMessage, transcript transcriptHash) (int, error) { + data, err := msg.marshal() + if err != nil { + return 0, err + } + + c.out.Lock() + defer c.out.Unlock() + + if transcript != nil { + transcript.Write(data) + } + if c.extraConfig != nil && c.extraConfig.AlternativeRecordLayer != nil { - if typ == recordTypeChangeCipherSpec { - return len(data), nil - } return c.extraConfig.AlternativeRecordLayer.WriteRecord(data) } + return c.writeRecordLocked(recordTypeHandshake, data) +} + +// writeChangeCipherRecord writes a ChangeCipherSpec message to the connection and +// updates the record layer state. +func (c *Conn) writeChangeCipherRecord() error { + if c.extraConfig != nil && c.extraConfig.AlternativeRecordLayer != nil { + return nil + } + c.out.Lock() defer c.out.Unlock() - - return c.writeRecordLocked(typ, data) + _, err := c.writeRecordLocked(recordTypeChangeCipherSpec, []byte{1}) + return err } // readHandshake reads the next handshake message from -// the record layer. -func (c *Conn) readHandshake() (any, error) { +// the record layer. If transcript is non-nil, the message +// is written to the passed transcriptHash. +func (c *Conn) readHandshake(transcript transcriptHash) (any, error) { var data []byte if c.extraConfig != nil && c.extraConfig.AlternativeRecordLayer != nil { var err error @@ -1147,6 +1168,11 @@ func (c *Conn) readHandshake() (any, error) { if !m.unmarshal(data) { return nil, c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage)) } + + if transcript != nil { + transcript.Write(data) + } + return m, nil } @@ -1222,7 +1248,7 @@ func (c *Conn) handleRenegotiation() error { return errors.New("tls: internal error: unexpected renegotiation") } - msg, err := c.readHandshake() + msg, err := c.readHandshake(nil) if err != nil { return err } @@ -1272,7 +1298,7 @@ func (c *Conn) handlePostHandshakeMessage() error { return c.handleRenegotiation() } - msg, err := c.readHandshake() + msg, err := c.readHandshake(nil) if err != nil { return err } @@ -1308,7 +1334,11 @@ func (c *Conn) handleKeyUpdate(keyUpdate *keyUpdateMsg) error { defer c.out.Unlock() msg := &keyUpdateMsg{} - _, err := c.writeRecordLocked(recordTypeHandshake, msg.marshal()) + msgBytes, err := msg.marshal() + if err != nil { + return err + } + _, err = c.writeRecordLocked(recordTypeHandshake, msgBytes) if err != nil { // Surface the error at the next write. c.out.setErrorLocked(err) diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/cpu.go b/vendor/github.com/quic-go/qtls-go1-19/cpu.go similarity index 100% rename from vendor/github.com/marten-seemann/qtls-go1-18/cpu.go rename to vendor/github.com/quic-go/qtls-go1-19/cpu.go diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/cpu_other.go b/vendor/github.com/quic-go/qtls-go1-19/cpu_other.go similarity index 100% rename from vendor/github.com/marten-seemann/qtls-go1-18/cpu_other.go rename to vendor/github.com/quic-go/qtls-go1-19/cpu_other.go diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/handshake_client.go b/vendor/github.com/quic-go/qtls-go1-19/handshake_client.go similarity index 91% rename from vendor/github.com/marten-seemann/qtls-go1-19/handshake_client.go rename to vendor/github.com/quic-go/qtls-go1-19/handshake_client.go index d373b886..8d1fae01 100644 --- a/vendor/github.com/marten-seemann/qtls-go1-19/handshake_client.go +++ b/vendor/github.com/quic-go/qtls-go1-19/handshake_client.go @@ -144,22 +144,13 @@ func (c *Conn) makeClientHello() (*clientHelloMsg, ecdheParameters, error) { var params ecdheParameters if hello.supportedVersions[0] == VersionTLS13 { - var suites []uint16 - for _, suiteID := range configCipherSuites { - for _, suite := range cipherSuitesTLS13 { - if suite.id == suiteID { - suites = append(suites, suiteID) - } - } + if len(hello.supportedVersions) == 1 { + hello.cipherSuites = hello.cipherSuites[:0] } - if len(suites) > 0 { - hello.cipherSuites = suites + if hasAESGCMHardwareSupport { + hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13...) } else { - if hasAESGCMHardwareSupport { - hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13...) - } else { - hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13NoAES...) - } + hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13NoAES...) } curveID := config.curvePreferences()[0] @@ -196,7 +187,10 @@ func (c *Conn) clientHandshake(ctx context.Context) (err error) { } c.serverName = hello.serverName - cacheKey, session, earlySecret, binderKey := c.loadSession(hello) + cacheKey, session, earlySecret, binderKey, err := c.loadSession(hello) + if err != nil { + return err + } if cacheKey != "" && session != nil { var deletedTicket bool if session.vers == VersionTLS13 && hello.earlyData && c.extraConfig != nil && c.extraConfig.Enable0RTT { @@ -206,11 +200,14 @@ func (c *Conn) clientHandshake(ctx context.Context) (err error) { if suite := cipherSuiteTLS13ByID(session.cipherSuite); suite != nil { h := suite.hash.New() - h.Write(hello.marshal()) + helloBytes, err := hello.marshal() + if err != nil { + return err + } + h.Write(helloBytes) clientEarlySecret := suite.deriveSecret(earlySecret, "c e traffic", h) c.out.exportKey(Encryption0RTT, suite, clientEarlySecret) if err := c.config.writeKeyLog(keyLogLabelEarlyTraffic, hello.random, clientEarlySecret); err != nil { - c.sendAlert(alertInternalError) return err } } @@ -230,11 +227,12 @@ func (c *Conn) clientHandshake(ctx context.Context) (err error) { } } - if _, err := c.writeRecord(recordTypeHandshake, hello.marshal()); err != nil { + if _, err := c.writeHandshakeRecord(hello, nil); err != nil { return err } - msg, err := c.readHandshake() + // serverHelloMsg is not included in the transcript + msg, err := c.readHandshake(nil) if err != nil { return err } @@ -327,9 +325,9 @@ func (c *Conn) decodeSessionState(session *clientSessionState) (uint32 /* max ea } func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string, - session *clientSessionState, earlySecret, binderKey []byte) { + session *clientSessionState, earlySecret, binderKey []byte, err error) { if c.config.SessionTicketsDisabled || c.config.ClientSessionCache == nil { - return "", nil, nil, nil + return "", nil, nil, nil, nil } hello.ticketSupported = true @@ -344,14 +342,14 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string, // renegotiation is primarily used to allow a client to send a client // certificate, which would be skipped if session resumption occurred. if c.handshakes != 0 { - return "", nil, nil, nil + return "", nil, nil, nil, nil } // Try to resume a previously negotiated TLS session, if available. cacheKey = clientSessionCacheKey(c.conn.RemoteAddr(), c.config) sess, ok := c.config.ClientSessionCache.Get(cacheKey) if !ok || sess == nil { - return cacheKey, nil, nil, nil + return cacheKey, nil, nil, nil, nil } session = fromClientSessionState(sess) @@ -362,7 +360,7 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string, maxEarlyData, appData, ok = c.decodeSessionState(session) if !ok { // delete it, if parsing failed c.config.ClientSessionCache.Put(cacheKey, nil) - return cacheKey, nil, nil, nil + return cacheKey, nil, nil, nil, nil } } @@ -375,7 +373,7 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string, } } if !versOk { - return cacheKey, nil, nil, nil + return cacheKey, nil, nil, nil, nil } // Check that the cached server certificate is not expired, and that it's @@ -384,16 +382,16 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string, if !c.config.InsecureSkipVerify { if len(session.verifiedChains) == 0 { // The original connection had InsecureSkipVerify, while this doesn't. - return cacheKey, nil, nil, nil + return cacheKey, nil, nil, nil, nil } serverCert := session.serverCertificates[0] if c.config.time().After(serverCert.NotAfter) { // Expired certificate, delete the entry. c.config.ClientSessionCache.Put(cacheKey, nil) - return cacheKey, nil, nil, nil + return cacheKey, nil, nil, nil, nil } if err := serverCert.VerifyHostname(c.config.ServerName); err != nil { - return cacheKey, nil, nil, nil + return cacheKey, nil, nil, nil, nil } } @@ -401,7 +399,7 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string, // In TLS 1.2 the cipher suite must match the resumed session. Ensure we // are still offering it. if mutualCipherSuite(hello.cipherSuites, session.cipherSuite) == nil { - return cacheKey, nil, nil, nil + return cacheKey, nil, nil, nil, nil } hello.sessionTicket = session.sessionTicket @@ -411,14 +409,14 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string, // Check that the session ticket is not expired. if c.config.time().After(session.useBy) { c.config.ClientSessionCache.Put(cacheKey, nil) - return cacheKey, nil, nil, nil + return cacheKey, nil, nil, nil, nil } // In TLS 1.3 the KDF hash must match the resumed session. Ensure we // offer at least one cipher suite with that hash. cipherSuite := cipherSuiteTLS13ByID(session.cipherSuite) if cipherSuite == nil { - return cacheKey, nil, nil, nil + return cacheKey, nil, nil, nil, nil } cipherSuiteOk := false for _, offeredID := range hello.cipherSuites { @@ -429,7 +427,7 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string, } } if !cipherSuiteOk { - return cacheKey, nil, nil, nil + return cacheKey, nil, nil, nil, nil } // Set the pre_shared_key extension. See RFC 8446, Section 4.2.11.1. @@ -450,9 +448,15 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string, hello.earlyData = c.extraConfig.Enable0RTT && maxEarlyData > 0 } transcript := cipherSuite.hash.New() - transcript.Write(hello.marshalWithoutBinders()) + helloBytes, err := hello.marshalWithoutBinders() + if err != nil { + return "", nil, nil, nil, err + } + transcript.Write(helloBytes) pskBinders := [][]byte{cipherSuite.finishedHash(binderKey, transcript)} - hello.updateBinders(pskBinders) + if err := hello.updateBinders(pskBinders); err != nil { + return "", nil, nil, nil, err + } if session.vers == VersionTLS13 && c.extraConfig != nil && c.extraConfig.SetAppDataFromSessionState != nil { c.extraConfig.SetAppDataFromSessionState(appData) @@ -500,8 +504,12 @@ func (hs *clientHandshakeState) handshake() error { hs.finishedHash.discardHandshakeBuffer() } - hs.finishedHash.Write(hs.hello.marshal()) - hs.finishedHash.Write(hs.serverHello.marshal()) + if err := transcriptMsg(hs.hello, &hs.finishedHash); err != nil { + return err + } + if err := transcriptMsg(hs.serverHello, &hs.finishedHash); err != nil { + return err + } c.buffering = true c.didResume = isResume @@ -572,7 +580,7 @@ func (hs *clientHandshakeState) pickCipherSuite() error { func (hs *clientHandshakeState) doFullHandshake() error { c := hs.c - msg, err := c.readHandshake() + msg, err := c.readHandshake(&hs.finishedHash) if err != nil { return err } @@ -581,9 +589,8 @@ func (hs *clientHandshakeState) doFullHandshake() error { c.sendAlert(alertUnexpectedMessage) return unexpectedMessageError(certMsg, msg) } - hs.finishedHash.Write(certMsg.marshal()) - msg, err = c.readHandshake() + msg, err = c.readHandshake(&hs.finishedHash) if err != nil { return err } @@ -601,11 +608,10 @@ func (hs *clientHandshakeState) doFullHandshake() error { c.sendAlert(alertUnexpectedMessage) return errors.New("tls: received unexpected CertificateStatus message") } - hs.finishedHash.Write(cs.marshal()) c.ocspResponse = cs.response - msg, err = c.readHandshake() + msg, err = c.readHandshake(&hs.finishedHash) if err != nil { return err } @@ -634,14 +640,13 @@ func (hs *clientHandshakeState) doFullHandshake() error { skx, ok := msg.(*serverKeyExchangeMsg) if ok { - hs.finishedHash.Write(skx.marshal()) err = keyAgreement.processServerKeyExchange(c.config, hs.hello, hs.serverHello, c.peerCertificates[0], skx) if err != nil { c.sendAlert(alertUnexpectedMessage) return err } - msg, err = c.readHandshake() + msg, err = c.readHandshake(&hs.finishedHash) if err != nil { return err } @@ -652,7 +657,6 @@ func (hs *clientHandshakeState) doFullHandshake() error { certReq, ok := msg.(*certificateRequestMsg) if ok { certRequested = true - hs.finishedHash.Write(certReq.marshal()) cri := certificateRequestInfoFromMsg(hs.ctx, c.vers, certReq) if chainToSend, err = c.getClientCertificate(cri); err != nil { @@ -660,7 +664,7 @@ func (hs *clientHandshakeState) doFullHandshake() error { return err } - msg, err = c.readHandshake() + msg, err = c.readHandshake(&hs.finishedHash) if err != nil { return err } @@ -671,7 +675,6 @@ func (hs *clientHandshakeState) doFullHandshake() error { c.sendAlert(alertUnexpectedMessage) return unexpectedMessageError(shd, msg) } - hs.finishedHash.Write(shd.marshal()) // If the server requested a certificate then we have to send a // Certificate message, even if it's empty because we don't have a @@ -679,8 +682,7 @@ func (hs *clientHandshakeState) doFullHandshake() error { if certRequested { certMsg = new(certificateMsg) certMsg.certificates = chainToSend.Certificate - hs.finishedHash.Write(certMsg.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(certMsg, &hs.finishedHash); err != nil { return err } } @@ -691,8 +693,7 @@ func (hs *clientHandshakeState) doFullHandshake() error { return err } if ckx != nil { - hs.finishedHash.Write(ckx.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, ckx.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(ckx, &hs.finishedHash); err != nil { return err } } @@ -739,8 +740,7 @@ func (hs *clientHandshakeState) doFullHandshake() error { return err } - hs.finishedHash.Write(certVerify.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certVerify.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(certVerify, &hs.finishedHash); err != nil { return err } } @@ -875,7 +875,10 @@ func (hs *clientHandshakeState) readFinished(out []byte) error { return err } - msg, err := c.readHandshake() + // finishedMsg is included in the transcript, but not until after we + // check the client version, since the state before this message was + // sent is used during verification. + msg, err := c.readHandshake(nil) if err != nil { return err } @@ -891,7 +894,11 @@ func (hs *clientHandshakeState) readFinished(out []byte) error { c.sendAlert(alertHandshakeFailure) return errors.New("tls: server's Finished message was incorrect") } - hs.finishedHash.Write(serverFinished.marshal()) + + if err := transcriptMsg(serverFinished, &hs.finishedHash); err != nil { + return err + } + copy(out, verify) return nil } @@ -902,7 +909,7 @@ func (hs *clientHandshakeState) readSessionTicket() error { } c := hs.c - msg, err := c.readHandshake() + msg, err := c.readHandshake(&hs.finishedHash) if err != nil { return err } @@ -911,7 +918,6 @@ func (hs *clientHandshakeState) readSessionTicket() error { c.sendAlert(alertUnexpectedMessage) return unexpectedMessageError(sessionTicketMsg, msg) } - hs.finishedHash.Write(sessionTicketMsg.marshal()) hs.session = &clientSessionState{ sessionTicket: sessionTicketMsg.ticket, @@ -931,20 +937,23 @@ func (hs *clientHandshakeState) readSessionTicket() error { func (hs *clientHandshakeState) sendFinished(out []byte) error { c := hs.c - if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil { + if err := c.writeChangeCipherRecord(); err != nil { return err } finished := new(finishedMsg) finished.verifyData = hs.finishedHash.clientSum(hs.masterSecret) - hs.finishedHash.Write(finished.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(finished, &hs.finishedHash); err != nil { return err } copy(out, finished.verifyData) return nil } +// maxRSAKeySize is the maximum RSA key size in bits that we are willing +// to verify the signatures of during a TLS handshake. +const maxRSAKeySize = 8192 + // verifyServerCertificate parses and verifies the provided chain, setting // c.verifiedChains and c.peerCertificates or sending the appropriate alert. func (c *Conn) verifyServerCertificate(certificates [][]byte) error { @@ -955,6 +964,10 @@ func (c *Conn) verifyServerCertificate(certificates [][]byte) error { c.sendAlert(alertBadCertificate) return errors.New("tls: failed to parse certificate from server: " + err.Error()) } + if cert.PublicKeyAlgorithm == x509.RSA && cert.PublicKey.(*rsa.PublicKey).N.BitLen() > maxRSAKeySize { + c.sendAlert(alertBadCertificate) + return fmt.Errorf("tls: server sent certificate containing RSA key larger than %d bits", maxRSAKeySize) + } certs[i] = cert } diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/handshake_client_tls13.go b/vendor/github.com/quic-go/qtls-go1-19/handshake_client_tls13.go similarity index 92% rename from vendor/github.com/marten-seemann/qtls-go1-19/handshake_client_tls13.go rename to vendor/github.com/quic-go/qtls-go1-19/handshake_client_tls13.go index 5c3ed0bd..05ca1333 100644 --- a/vendor/github.com/marten-seemann/qtls-go1-19/handshake_client_tls13.go +++ b/vendor/github.com/quic-go/qtls-go1-19/handshake_client_tls13.go @@ -65,7 +65,10 @@ func (hs *clientHandshakeStateTLS13) handshake() error { } hs.transcript = hs.suite.hash.New() - hs.transcript.Write(hs.hello.marshal()) + + if err := transcriptMsg(hs.hello, hs.transcript); err != nil { + return err + } if bytes.Equal(hs.serverHello.random, helloRetryRequestRandom) { if err := hs.sendDummyChangeCipherSpec(); err != nil { @@ -76,7 +79,9 @@ func (hs *clientHandshakeStateTLS13) handshake() error { } } - hs.transcript.Write(hs.serverHello.marshal()) + if err := transcriptMsg(hs.serverHello, hs.transcript); err != nil { + return err + } c.buffering = true if err := hs.processServerHello(); err != nil { @@ -177,8 +182,7 @@ func (hs *clientHandshakeStateTLS13) sendDummyChangeCipherSpec() error { } hs.sentDummyCCS = true - _, err := hs.c.writeRecord(recordTypeChangeCipherSpec, []byte{1}) - return err + return hs.c.writeChangeCipherRecord() } // processHelloRetryRequest handles the HRR in hs.serverHello, modifies and @@ -193,7 +197,9 @@ func (hs *clientHandshakeStateTLS13) processHelloRetryRequest() error { hs.transcript.Reset() hs.transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))}) hs.transcript.Write(chHash) - hs.transcript.Write(hs.serverHello.marshal()) + if err := transcriptMsg(hs.serverHello, hs.transcript); err != nil { + return err + } // The only HelloRetryRequest extensions we support are key_share and // cookie, and clients must abort the handshake if the HRR would not result @@ -258,10 +264,18 @@ func (hs *clientHandshakeStateTLS13) processHelloRetryRequest() error { transcript := hs.suite.hash.New() transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))}) transcript.Write(chHash) - transcript.Write(hs.serverHello.marshal()) - transcript.Write(hs.hello.marshalWithoutBinders()) + if err := transcriptMsg(hs.serverHello, hs.transcript); err != nil { + return err + } + helloBytes, err := hs.hello.marshalWithoutBinders() + if err != nil { + return err + } + transcript.Write(helloBytes) pskBinders := [][]byte{hs.suite.finishedHash(hs.binderKey, transcript)} - hs.hello.updateBinders(pskBinders) + if err := hs.hello.updateBinders(pskBinders); err != nil { + return err + } } else { // Server selected a cipher suite incompatible with the PSK. hs.hello.pskIdentities = nil @@ -273,13 +287,12 @@ func (hs *clientHandshakeStateTLS13) processHelloRetryRequest() error { c.extraConfig.Rejected0RTT() } hs.hello.earlyData = false // disable 0-RTT - - hs.transcript.Write(hs.hello.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(hs.hello, hs.transcript); err != nil { return err } - msg, err := c.readHandshake() + // serverHelloMsg is not included in the transcript + msg, err := c.readHandshake(nil) if err != nil { return err } @@ -368,6 +381,7 @@ func (hs *clientHandshakeStateTLS13) establishHandshakeKeys() error { if !hs.usingPSK { earlySecret = hs.suite.extract(nil, nil) } + handshakeSecret := hs.suite.extract(sharedKey, hs.suite.deriveSecret(earlySecret, "derived", nil)) @@ -400,7 +414,7 @@ func (hs *clientHandshakeStateTLS13) establishHandshakeKeys() error { func (hs *clientHandshakeStateTLS13) readServerParameters() error { c := hs.c - msg, err := c.readHandshake() + msg, err := c.readHandshake(hs.transcript) if err != nil { return err } @@ -418,7 +432,6 @@ func (hs *clientHandshakeStateTLS13) readServerParameters() error { if hs.c.extraConfig != nil && hs.c.extraConfig.ReceivedExtensions != nil { hs.c.extraConfig.ReceivedExtensions(typeEncryptedExtensions, encryptedExtensions.additionalExtensions) } - hs.transcript.Write(encryptedExtensions.marshal()) if err := checkALPN(hs.hello.alpnProtocols, encryptedExtensions.alpnProtocol); err != nil { c.sendAlert(alertUnsupportedExtension) @@ -454,18 +467,16 @@ func (hs *clientHandshakeStateTLS13) readServerCertificate() error { return nil } - msg, err := c.readHandshake() + msg, err := c.readHandshake(hs.transcript) if err != nil { return err } certReq, ok := msg.(*certificateRequestMsgTLS13) if ok { - hs.transcript.Write(certReq.marshal()) - hs.certReq = certReq - msg, err = c.readHandshake() + msg, err = c.readHandshake(hs.transcript) if err != nil { return err } @@ -480,7 +491,6 @@ func (hs *clientHandshakeStateTLS13) readServerCertificate() error { c.sendAlert(alertDecodeError) return errors.New("tls: received empty certificates message") } - hs.transcript.Write(certMsg.marshal()) c.scts = certMsg.certificate.SignedCertificateTimestamps c.ocspResponse = certMsg.certificate.OCSPStaple @@ -489,7 +499,10 @@ func (hs *clientHandshakeStateTLS13) readServerCertificate() error { return err } - msg, err = c.readHandshake() + // certificateVerifyMsg is included in the transcript, but not until + // after we verify the handshake signature, since the state before + // this message was sent is used. + msg, err = c.readHandshake(nil) if err != nil { return err } @@ -520,7 +533,9 @@ func (hs *clientHandshakeStateTLS13) readServerCertificate() error { return errors.New("tls: invalid signature by the server certificate: " + err.Error()) } - hs.transcript.Write(certVerify.marshal()) + if err := transcriptMsg(certVerify, hs.transcript); err != nil { + return err + } return nil } @@ -528,7 +543,10 @@ func (hs *clientHandshakeStateTLS13) readServerCertificate() error { func (hs *clientHandshakeStateTLS13) readServerFinished() error { c := hs.c - msg, err := c.readHandshake() + // finishedMsg is included in the transcript, but not until after we + // check the client version, since the state before this message was + // sent is used during verification. + msg, err := c.readHandshake(nil) if err != nil { return err } @@ -545,7 +563,9 @@ func (hs *clientHandshakeStateTLS13) readServerFinished() error { return errors.New("tls: invalid server finished hash") } - hs.transcript.Write(finished.marshal()) + if err := transcriptMsg(finished, hs.transcript); err != nil { + return err + } // Derive secrets that take context through the server Finished. @@ -595,8 +615,7 @@ func (hs *clientHandshakeStateTLS13) sendClientCertificate() error { certMsg.scts = hs.certReq.scts && len(cert.SignedCertificateTimestamps) > 0 certMsg.ocspStapling = hs.certReq.ocspStapling && len(cert.OCSPStaple) > 0 - hs.transcript.Write(certMsg.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(certMsg, hs.transcript); err != nil { return err } @@ -633,8 +652,7 @@ func (hs *clientHandshakeStateTLS13) sendClientCertificate() error { } certVerifyMsg.signature = sig - hs.transcript.Write(certVerifyMsg.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certVerifyMsg.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(certVerifyMsg, hs.transcript); err != nil { return err } @@ -648,8 +666,7 @@ func (hs *clientHandshakeStateTLS13) sendClientFinished() error { verifyData: hs.suite.finishedHash(c.out.trafficSecret, hs.transcript), } - hs.transcript.Write(finished.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(finished, hs.transcript); err != nil { return err } diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/handshake_messages.go b/vendor/github.com/quic-go/qtls-go1-19/handshake_messages.go similarity index 76% rename from vendor/github.com/marten-seemann/qtls-go1-19/handshake_messages.go rename to vendor/github.com/quic-go/qtls-go1-19/handshake_messages.go index 07193c8e..c69fcefd 100644 --- a/vendor/github.com/marten-seemann/qtls-go1-19/handshake_messages.go +++ b/vendor/github.com/quic-go/qtls-go1-19/handshake_messages.go @@ -5,6 +5,7 @@ package qtls import ( + "errors" "fmt" "strings" @@ -95,9 +96,187 @@ type clientHelloMsg struct { additionalExtensions []Extension } -func (m *clientHelloMsg) marshal() []byte { +func (m *clientHelloMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil + } + + var exts cryptobyte.Builder + if len(m.serverName) > 0 { + // RFC 6066, Section 3 + exts.AddUint16(extensionServerName) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint8(0) // name_type = host_name + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes([]byte(m.serverName)) + }) + }) + }) + } + if m.ocspStapling { + // RFC 4366, Section 3.6 + exts.AddUint16(extensionStatusRequest) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint8(1) // status_type = ocsp + exts.AddUint16(0) // empty responder_id_list + exts.AddUint16(0) // empty request_extensions + }) + } + if len(m.supportedCurves) > 0 { + // RFC 4492, sections 5.1.1 and RFC 8446, Section 4.2.7 + exts.AddUint16(extensionSupportedCurves) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + for _, curve := range m.supportedCurves { + exts.AddUint16(uint16(curve)) + } + }) + }) + } + if len(m.supportedPoints) > 0 { + // RFC 4492, Section 5.1.2 + exts.AddUint16(extensionSupportedPoints) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(m.supportedPoints) + }) + }) + } + if m.ticketSupported { + // RFC 5077, Section 3.2 + exts.AddUint16(extensionSessionTicket) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(m.sessionTicket) + }) + } + if len(m.supportedSignatureAlgorithms) > 0 { + // RFC 5246, Section 7.4.1.4.1 + exts.AddUint16(extensionSignatureAlgorithms) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + for _, sigAlgo := range m.supportedSignatureAlgorithms { + exts.AddUint16(uint16(sigAlgo)) + } + }) + }) + } + if len(m.supportedSignatureAlgorithmsCert) > 0 { + // RFC 8446, Section 4.2.3 + exts.AddUint16(extensionSignatureAlgorithmsCert) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + for _, sigAlgo := range m.supportedSignatureAlgorithmsCert { + exts.AddUint16(uint16(sigAlgo)) + } + }) + }) + } + if m.secureRenegotiationSupported { + // RFC 5746, Section 3.2 + exts.AddUint16(extensionRenegotiationInfo) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(m.secureRenegotiation) + }) + }) + } + if len(m.alpnProtocols) > 0 { + // RFC 7301, Section 3.1 + exts.AddUint16(extensionALPN) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + for _, proto := range m.alpnProtocols { + exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes([]byte(proto)) + }) + } + }) + }) + } + if m.scts { + // RFC 6962, Section 3.3.1 + exts.AddUint16(extensionSCT) + exts.AddUint16(0) // empty extension_data + } + if len(m.supportedVersions) > 0 { + // RFC 8446, Section 4.2.1 + exts.AddUint16(extensionSupportedVersions) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) { + for _, vers := range m.supportedVersions { + exts.AddUint16(vers) + } + }) + }) + } + if len(m.cookie) > 0 { + // RFC 8446, Section 4.2.2 + exts.AddUint16(extensionCookie) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(m.cookie) + }) + }) + } + if len(m.keyShares) > 0 { + // RFC 8446, Section 4.2.8 + exts.AddUint16(extensionKeyShare) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + for _, ks := range m.keyShares { + exts.AddUint16(uint16(ks.group)) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(ks.data) + }) + } + }) + }) + } + if m.earlyData { + // RFC 8446, Section 4.2.10 + exts.AddUint16(extensionEarlyData) + exts.AddUint16(0) // empty extension_data + } + if len(m.pskModes) > 0 { + // RFC 8446, Section 4.2.9 + exts.AddUint16(extensionPSKModes) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(m.pskModes) + }) + }) + } + for _, ext := range m.additionalExtensions { + exts.AddUint16(ext.Type) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(ext.Data) + }) + } + if len(m.pskIdentities) > 0 { // pre_shared_key must be the last extension + // RFC 8446, Section 4.2.11 + exts.AddUint16(extensionPreSharedKey) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + for _, psk := range m.pskIdentities { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(psk.label) + }) + exts.AddUint32(psk.obfuscatedTicketAge) + } + }) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + for _, binder := range m.pskBinders { + exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(binder) + }) + } + }) + }) + } + extBytes, err := exts.Bytes() + if err != nil { + return nil, err } var b cryptobyte.Builder @@ -117,225 +296,53 @@ func (m *clientHelloMsg) marshal() []byte { b.AddBytes(m.compressionMethods) }) - // If extensions aren't present, omit them. - var extensionsPresent bool - bWithoutExtensions := *b - - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - if len(m.serverName) > 0 { - // RFC 6066, Section 3 - b.AddUint16(extensionServerName) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8(0) // name_type = host_name - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes([]byte(m.serverName)) - }) - }) - }) - } - if m.ocspStapling { - // RFC 4366, Section 3.6 - b.AddUint16(extensionStatusRequest) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8(1) // status_type = ocsp - b.AddUint16(0) // empty responder_id_list - b.AddUint16(0) // empty request_extensions - }) - } - if len(m.supportedCurves) > 0 { - // RFC 4492, sections 5.1.1 and RFC 8446, Section 4.2.7 - b.AddUint16(extensionSupportedCurves) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, curve := range m.supportedCurves { - b.AddUint16(uint16(curve)) - } - }) - }) - } - if len(m.supportedPoints) > 0 { - // RFC 4492, Section 5.1.2 - b.AddUint16(extensionSupportedPoints) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.supportedPoints) - }) - }) - } - if m.ticketSupported { - // RFC 5077, Section 3.2 - b.AddUint16(extensionSessionTicket) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.sessionTicket) - }) - } - if len(m.supportedSignatureAlgorithms) > 0 { - // RFC 5246, Section 7.4.1.4.1 - b.AddUint16(extensionSignatureAlgorithms) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, sigAlgo := range m.supportedSignatureAlgorithms { - b.AddUint16(uint16(sigAlgo)) - } - }) - }) - } - if len(m.supportedSignatureAlgorithmsCert) > 0 { - // RFC 8446, Section 4.2.3 - b.AddUint16(extensionSignatureAlgorithmsCert) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, sigAlgo := range m.supportedSignatureAlgorithmsCert { - b.AddUint16(uint16(sigAlgo)) - } - }) - }) - } - if m.secureRenegotiationSupported { - // RFC 5746, Section 3.2 - b.AddUint16(extensionRenegotiationInfo) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.secureRenegotiation) - }) - }) - } - if len(m.alpnProtocols) > 0 { - // RFC 7301, Section 3.1 - b.AddUint16(extensionALPN) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, proto := range m.alpnProtocols { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes([]byte(proto)) - }) - } - }) - }) - } - if m.scts { - // RFC 6962, Section 3.3.1 - b.AddUint16(extensionSCT) - b.AddUint16(0) // empty extension_data - } - if len(m.supportedVersions) > 0 { - // RFC 8446, Section 4.2.1 - b.AddUint16(extensionSupportedVersions) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - for _, vers := range m.supportedVersions { - b.AddUint16(vers) - } - }) - }) - } - if len(m.cookie) > 0 { - // RFC 8446, Section 4.2.2 - b.AddUint16(extensionCookie) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.cookie) - }) - }) - } - if len(m.keyShares) > 0 { - // RFC 8446, Section 4.2.8 - b.AddUint16(extensionKeyShare) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, ks := range m.keyShares { - b.AddUint16(uint16(ks.group)) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(ks.data) - }) - } - }) - }) - } - if m.earlyData { - // RFC 8446, Section 4.2.10 - b.AddUint16(extensionEarlyData) - b.AddUint16(0) // empty extension_data - } - if len(m.pskModes) > 0 { - // RFC 8446, Section 4.2.9 - b.AddUint16(extensionPSKModes) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.pskModes) - }) - }) - } - for _, ext := range m.additionalExtensions { - b.AddUint16(ext.Type) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(ext.Data) - }) - } - if len(m.pskIdentities) > 0 { // pre_shared_key must be the last extension - // RFC 8446, Section 4.2.11 - b.AddUint16(extensionPreSharedKey) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, psk := range m.pskIdentities { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(psk.label) - }) - b.AddUint32(psk.obfuscatedTicketAge) - } - }) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, binder := range m.pskBinders { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(binder) - }) - } - }) - }) - } - - extensionsPresent = len(b.BytesOrPanic()) > 2 - }) - - if !extensionsPresent { - *b = bWithoutExtensions + if len(extBytes) > 0 { + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes(extBytes) + }) } }) - m.raw = b.BytesOrPanic() - return m.raw + m.raw, err = b.Bytes() + return m.raw, err } // marshalWithoutBinders returns the ClientHello through the // PreSharedKeyExtension.identities field, according to RFC 8446, Section // 4.2.11.2. Note that m.pskBinders must be set to slices of the correct length. -func (m *clientHelloMsg) marshalWithoutBinders() []byte { +func (m *clientHelloMsg) marshalWithoutBinders() ([]byte, error) { bindersLen := 2 // uint16 length prefix for _, binder := range m.pskBinders { bindersLen += 1 // uint8 length prefix bindersLen += len(binder) } - fullMessage := m.marshal() - return fullMessage[:len(fullMessage)-bindersLen] + fullMessage, err := m.marshal() + if err != nil { + return nil, err + } + return fullMessage[:len(fullMessage)-bindersLen], nil } // updateBinders updates the m.pskBinders field, if necessary updating the // cached marshaled representation. The supplied binders must have the same // length as the current m.pskBinders. -func (m *clientHelloMsg) updateBinders(pskBinders [][]byte) { +func (m *clientHelloMsg) updateBinders(pskBinders [][]byte) error { if len(pskBinders) != len(m.pskBinders) { - panic("tls: internal error: pskBinders length mismatch") + return errors.New("tls: internal error: pskBinders length mismatch") } for i := range m.pskBinders { if len(pskBinders[i]) != len(m.pskBinders[i]) { - panic("tls: internal error: pskBinders length mismatch") + return errors.New("tls: internal error: pskBinders length mismatch") } } m.pskBinders = pskBinders if m.raw != nil { - lenWithoutBinders := len(m.marshalWithoutBinders()) + helloBytes, err := m.marshalWithoutBinders() + if err != nil { + return err + } + lenWithoutBinders := len(helloBytes) b := cryptobyte.NewFixedBuilder(m.raw[:lenWithoutBinders]) b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { for _, binder := range m.pskBinders { @@ -345,9 +352,11 @@ func (m *clientHelloMsg) updateBinders(pskBinders [][]byte) { } }) if out, err := b.Bytes(); err != nil || len(out) != len(m.raw) { - panic("tls: internal error: failed to update binders") + return errors.New("tls: internal error: failed to update binders") } } + + return nil } func (m *clientHelloMsg) unmarshal(data []byte) bool { @@ -625,9 +634,98 @@ type serverHelloMsg struct { selectedGroup CurveID } -func (m *serverHelloMsg) marshal() []byte { +func (m *serverHelloMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil + } + + var exts cryptobyte.Builder + if m.ocspStapling { + exts.AddUint16(extensionStatusRequest) + exts.AddUint16(0) // empty extension_data + } + if m.ticketSupported { + exts.AddUint16(extensionSessionTicket) + exts.AddUint16(0) // empty extension_data + } + if m.secureRenegotiationSupported { + exts.AddUint16(extensionRenegotiationInfo) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(m.secureRenegotiation) + }) + }) + } + if len(m.alpnProtocol) > 0 { + exts.AddUint16(extensionALPN) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes([]byte(m.alpnProtocol)) + }) + }) + }) + } + if len(m.scts) > 0 { + exts.AddUint16(extensionSCT) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + for _, sct := range m.scts { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(sct) + }) + } + }) + }) + } + if m.supportedVersion != 0 { + exts.AddUint16(extensionSupportedVersions) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16(m.supportedVersion) + }) + } + if m.serverShare.group != 0 { + exts.AddUint16(extensionKeyShare) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16(uint16(m.serverShare.group)) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(m.serverShare.data) + }) + }) + } + if m.selectedIdentityPresent { + exts.AddUint16(extensionPreSharedKey) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16(m.selectedIdentity) + }) + } + + if len(m.cookie) > 0 { + exts.AddUint16(extensionCookie) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(m.cookie) + }) + }) + } + if m.selectedGroup != 0 { + exts.AddUint16(extensionKeyShare) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16(uint16(m.selectedGroup)) + }) + } + if len(m.supportedPoints) > 0 { + exts.AddUint16(extensionSupportedPoints) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(m.supportedPoints) + }) + }) + } + + extBytes, err := exts.Bytes() + if err != nil { + return nil, err } var b cryptobyte.Builder @@ -641,104 +739,15 @@ func (m *serverHelloMsg) marshal() []byte { b.AddUint16(m.cipherSuite) b.AddUint8(m.compressionMethod) - // If extensions aren't present, omit them. - var extensionsPresent bool - bWithoutExtensions := *b - - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - if m.ocspStapling { - b.AddUint16(extensionStatusRequest) - b.AddUint16(0) // empty extension_data - } - if m.ticketSupported { - b.AddUint16(extensionSessionTicket) - b.AddUint16(0) // empty extension_data - } - if m.secureRenegotiationSupported { - b.AddUint16(extensionRenegotiationInfo) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.secureRenegotiation) - }) - }) - } - if len(m.alpnProtocol) > 0 { - b.AddUint16(extensionALPN) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes([]byte(m.alpnProtocol)) - }) - }) - }) - } - if len(m.scts) > 0 { - b.AddUint16(extensionSCT) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, sct := range m.scts { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(sct) - }) - } - }) - }) - } - if m.supportedVersion != 0 { - b.AddUint16(extensionSupportedVersions) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16(m.supportedVersion) - }) - } - if m.serverShare.group != 0 { - b.AddUint16(extensionKeyShare) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16(uint16(m.serverShare.group)) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.serverShare.data) - }) - }) - } - if m.selectedIdentityPresent { - b.AddUint16(extensionPreSharedKey) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16(m.selectedIdentity) - }) - } - - if len(m.cookie) > 0 { - b.AddUint16(extensionCookie) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.cookie) - }) - }) - } - if m.selectedGroup != 0 { - b.AddUint16(extensionKeyShare) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16(uint16(m.selectedGroup)) - }) - } - if len(m.supportedPoints) > 0 { - b.AddUint16(extensionSupportedPoints) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.supportedPoints) - }) - }) - } - - extensionsPresent = len(b.BytesOrPanic()) > 2 - }) - - if !extensionsPresent { - *b = bWithoutExtensions + if len(extBytes) > 0 { + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes(extBytes) + }) } }) - m.raw = b.BytesOrPanic() - return m.raw + m.raw, err = b.Bytes() + return m.raw, err } func (m *serverHelloMsg) unmarshal(data []byte) bool { @@ -865,9 +874,9 @@ type encryptedExtensionsMsg struct { additionalExtensions []Extension } -func (m *encryptedExtensionsMsg) marshal() []byte { +func (m *encryptedExtensionsMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } var b cryptobyte.Builder @@ -898,8 +907,9 @@ func (m *encryptedExtensionsMsg) marshal() []byte { }) }) - m.raw = b.BytesOrPanic() - return m.raw + var err error + m.raw, err = b.Bytes() + return m.raw, err } func (m *encryptedExtensionsMsg) unmarshal(data []byte) bool { @@ -949,10 +959,10 @@ func (m *encryptedExtensionsMsg) unmarshal(data []byte) bool { type endOfEarlyDataMsg struct{} -func (m *endOfEarlyDataMsg) marshal() []byte { +func (m *endOfEarlyDataMsg) marshal() ([]byte, error) { x := make([]byte, 4) x[0] = typeEndOfEarlyData - return x + return x, nil } func (m *endOfEarlyDataMsg) unmarshal(data []byte) bool { @@ -964,9 +974,9 @@ type keyUpdateMsg struct { updateRequested bool } -func (m *keyUpdateMsg) marshal() []byte { +func (m *keyUpdateMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } var b cryptobyte.Builder @@ -979,8 +989,9 @@ func (m *keyUpdateMsg) marshal() []byte { } }) - m.raw = b.BytesOrPanic() - return m.raw + var err error + m.raw, err = b.Bytes() + return m.raw, err } func (m *keyUpdateMsg) unmarshal(data []byte) bool { @@ -1012,9 +1023,9 @@ type newSessionTicketMsgTLS13 struct { maxEarlyData uint32 } -func (m *newSessionTicketMsgTLS13) marshal() []byte { +func (m *newSessionTicketMsgTLS13) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } var b cryptobyte.Builder @@ -1039,8 +1050,9 @@ func (m *newSessionTicketMsgTLS13) marshal() []byte { }) }) - m.raw = b.BytesOrPanic() - return m.raw + var err error + m.raw, err = b.Bytes() + return m.raw, err } func (m *newSessionTicketMsgTLS13) unmarshal(data []byte) bool { @@ -1093,9 +1105,9 @@ type certificateRequestMsgTLS13 struct { certificateAuthorities [][]byte } -func (m *certificateRequestMsgTLS13) marshal() []byte { +func (m *certificateRequestMsgTLS13) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } var b cryptobyte.Builder @@ -1154,8 +1166,9 @@ func (m *certificateRequestMsgTLS13) marshal() []byte { }) }) - m.raw = b.BytesOrPanic() - return m.raw + var err error + m.raw, err = b.Bytes() + return m.raw, err } func (m *certificateRequestMsgTLS13) unmarshal(data []byte) bool { @@ -1239,9 +1252,9 @@ type certificateMsg struct { certificates [][]byte } -func (m *certificateMsg) marshal() (x []byte) { +func (m *certificateMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } var i int @@ -1250,7 +1263,7 @@ func (m *certificateMsg) marshal() (x []byte) { } length := 3 + 3*len(m.certificates) + i - x = make([]byte, 4+length) + x := make([]byte, 4+length) x[0] = typeCertificate x[1] = uint8(length >> 16) x[2] = uint8(length >> 8) @@ -1271,7 +1284,7 @@ func (m *certificateMsg) marshal() (x []byte) { } m.raw = x - return + return m.raw, nil } func (m *certificateMsg) unmarshal(data []byte) bool { @@ -1318,9 +1331,9 @@ type certificateMsgTLS13 struct { scts bool } -func (m *certificateMsgTLS13) marshal() []byte { +func (m *certificateMsgTLS13) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } var b cryptobyte.Builder @@ -1338,8 +1351,9 @@ func (m *certificateMsgTLS13) marshal() []byte { marshalCertificate(b, certificate) }) - m.raw = b.BytesOrPanic() - return m.raw + var err error + m.raw, err = b.Bytes() + return m.raw, err } func marshalCertificate(b *cryptobyte.Builder, certificate Certificate) { @@ -1462,9 +1476,9 @@ type serverKeyExchangeMsg struct { key []byte } -func (m *serverKeyExchangeMsg) marshal() []byte { +func (m *serverKeyExchangeMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } length := len(m.key) x := make([]byte, length+4) @@ -1475,7 +1489,7 @@ func (m *serverKeyExchangeMsg) marshal() []byte { copy(x[4:], m.key) m.raw = x - return x + return x, nil } func (m *serverKeyExchangeMsg) unmarshal(data []byte) bool { @@ -1492,9 +1506,9 @@ type certificateStatusMsg struct { response []byte } -func (m *certificateStatusMsg) marshal() []byte { +func (m *certificateStatusMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } var b cryptobyte.Builder @@ -1506,8 +1520,9 @@ func (m *certificateStatusMsg) marshal() []byte { }) }) - m.raw = b.BytesOrPanic() - return m.raw + var err error + m.raw, err = b.Bytes() + return m.raw, err } func (m *certificateStatusMsg) unmarshal(data []byte) bool { @@ -1526,10 +1541,10 @@ func (m *certificateStatusMsg) unmarshal(data []byte) bool { type serverHelloDoneMsg struct{} -func (m *serverHelloDoneMsg) marshal() []byte { +func (m *serverHelloDoneMsg) marshal() ([]byte, error) { x := make([]byte, 4) x[0] = typeServerHelloDone - return x + return x, nil } func (m *serverHelloDoneMsg) unmarshal(data []byte) bool { @@ -1541,9 +1556,9 @@ type clientKeyExchangeMsg struct { ciphertext []byte } -func (m *clientKeyExchangeMsg) marshal() []byte { +func (m *clientKeyExchangeMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } length := len(m.ciphertext) x := make([]byte, length+4) @@ -1554,7 +1569,7 @@ func (m *clientKeyExchangeMsg) marshal() []byte { copy(x[4:], m.ciphertext) m.raw = x - return x + return x, nil } func (m *clientKeyExchangeMsg) unmarshal(data []byte) bool { @@ -1575,9 +1590,9 @@ type finishedMsg struct { verifyData []byte } -func (m *finishedMsg) marshal() []byte { +func (m *finishedMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } var b cryptobyte.Builder @@ -1586,8 +1601,9 @@ func (m *finishedMsg) marshal() []byte { b.AddBytes(m.verifyData) }) - m.raw = b.BytesOrPanic() - return m.raw + var err error + m.raw, err = b.Bytes() + return m.raw, err } func (m *finishedMsg) unmarshal(data []byte) bool { @@ -1609,9 +1625,9 @@ type certificateRequestMsg struct { certificateAuthorities [][]byte } -func (m *certificateRequestMsg) marshal() (x []byte) { +func (m *certificateRequestMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } // See RFC 4346, Section 7.4.4. @@ -1626,7 +1642,7 @@ func (m *certificateRequestMsg) marshal() (x []byte) { length += 2 + 2*len(m.supportedSignatureAlgorithms) } - x = make([]byte, 4+length) + x := make([]byte, 4+length) x[0] = typeCertificateRequest x[1] = uint8(length >> 16) x[2] = uint8(length >> 8) @@ -1661,7 +1677,7 @@ func (m *certificateRequestMsg) marshal() (x []byte) { } m.raw = x - return + return m.raw, nil } func (m *certificateRequestMsg) unmarshal(data []byte) bool { @@ -1747,9 +1763,9 @@ type certificateVerifyMsg struct { signature []byte } -func (m *certificateVerifyMsg) marshal() (x []byte) { +func (m *certificateVerifyMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } var b cryptobyte.Builder @@ -1763,8 +1779,9 @@ func (m *certificateVerifyMsg) marshal() (x []byte) { }) }) - m.raw = b.BytesOrPanic() - return m.raw + var err error + m.raw, err = b.Bytes() + return m.raw, err } func (m *certificateVerifyMsg) unmarshal(data []byte) bool { @@ -1787,15 +1804,15 @@ type newSessionTicketMsg struct { ticket []byte } -func (m *newSessionTicketMsg) marshal() (x []byte) { +func (m *newSessionTicketMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } // See RFC 5077, Section 3.3. ticketLen := len(m.ticket) length := 2 + 4 + ticketLen - x = make([]byte, 4+length) + x := make([]byte, 4+length) x[0] = typeNewSessionTicket x[1] = uint8(length >> 16) x[2] = uint8(length >> 8) @@ -1806,7 +1823,7 @@ func (m *newSessionTicketMsg) marshal() (x []byte) { m.raw = x - return + return m.raw, nil } func (m *newSessionTicketMsg) unmarshal(data []byte) bool { @@ -1834,10 +1851,25 @@ func (m *newSessionTicketMsg) unmarshal(data []byte) bool { type helloRequestMsg struct { } -func (*helloRequestMsg) marshal() []byte { - return []byte{typeHelloRequest, 0, 0, 0} +func (*helloRequestMsg) marshal() ([]byte, error) { + return []byte{typeHelloRequest, 0, 0, 0}, nil } func (*helloRequestMsg) unmarshal(data []byte) bool { return len(data) == 4 } + +type transcriptHash interface { + Write([]byte) (int, error) +} + +// transcriptMsg is a helper used to marshal and hash messages which typically +// are not written to the wire, and as such aren't hashed during Conn.writeRecord. +func transcriptMsg(msg handshakeMessage, h transcriptHash) error { + data, err := msg.marshal() + if err != nil { + return err + } + h.Write(data) + return nil +} diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/handshake_server.go b/vendor/github.com/quic-go/qtls-go1-19/handshake_server.go similarity index 91% rename from vendor/github.com/marten-seemann/qtls-go1-19/handshake_server.go rename to vendor/github.com/quic-go/qtls-go1-19/handshake_server.go index b363d53f..738fc947 100644 --- a/vendor/github.com/marten-seemann/qtls-go1-19/handshake_server.go +++ b/vendor/github.com/quic-go/qtls-go1-19/handshake_server.go @@ -138,7 +138,9 @@ func (hs *serverHandshakeState) handshake() error { // readClientHello reads a ClientHello message and selects the protocol version. func (c *Conn) readClientHello(ctx context.Context) (*clientHelloMsg, error) { - msg, err := c.readHandshake() + // clientHelloMsg is included in the transcript, but we haven't initialized + // it yet. The respective handshake functions will record it themselves. + msg, err := c.readHandshake(nil) if err != nil { return nil, err } @@ -494,9 +496,10 @@ func (hs *serverHandshakeState) doResumeHandshake() error { hs.hello.ticketSupported = hs.sessionState.usedOldKey hs.finishedHash = newFinishedHash(c.vers, hs.suite) hs.finishedHash.discardHandshakeBuffer() - hs.finishedHash.Write(hs.clientHello.marshal()) - hs.finishedHash.Write(hs.hello.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil { + if err := transcriptMsg(hs.clientHello, &hs.finishedHash); err != nil { + return err + } + if _, err := hs.c.writeHandshakeRecord(hs.hello, &hs.finishedHash); err != nil { return err } @@ -534,24 +537,23 @@ func (hs *serverHandshakeState) doFullHandshake() error { // certificates won't be used. hs.finishedHash.discardHandshakeBuffer() } - hs.finishedHash.Write(hs.clientHello.marshal()) - hs.finishedHash.Write(hs.hello.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil { + if err := transcriptMsg(hs.clientHello, &hs.finishedHash); err != nil { + return err + } + if _, err := hs.c.writeHandshakeRecord(hs.hello, &hs.finishedHash); err != nil { return err } certMsg := new(certificateMsg) certMsg.certificates = hs.cert.Certificate - hs.finishedHash.Write(certMsg.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(certMsg, &hs.finishedHash); err != nil { return err } if hs.hello.ocspStapling { certStatus := new(certificateStatusMsg) certStatus.response = hs.cert.OCSPStaple - hs.finishedHash.Write(certStatus.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certStatus.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(certStatus, &hs.finishedHash); err != nil { return err } } @@ -563,8 +565,7 @@ func (hs *serverHandshakeState) doFullHandshake() error { return err } if skx != nil { - hs.finishedHash.Write(skx.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, skx.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(skx, &hs.finishedHash); err != nil { return err } } @@ -590,15 +591,13 @@ func (hs *serverHandshakeState) doFullHandshake() error { if c.config.ClientCAs != nil { certReq.certificateAuthorities = c.config.ClientCAs.Subjects() } - hs.finishedHash.Write(certReq.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certReq.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(certReq, &hs.finishedHash); err != nil { return err } } helloDone := new(serverHelloDoneMsg) - hs.finishedHash.Write(helloDone.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, helloDone.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(helloDone, &hs.finishedHash); err != nil { return err } @@ -608,7 +607,7 @@ func (hs *serverHandshakeState) doFullHandshake() error { var pub crypto.PublicKey // public key for client auth, if any - msg, err := c.readHandshake() + msg, err := c.readHandshake(&hs.finishedHash) if err != nil { return err } @@ -621,7 +620,6 @@ func (hs *serverHandshakeState) doFullHandshake() error { c.sendAlert(alertUnexpectedMessage) return unexpectedMessageError(certMsg, msg) } - hs.finishedHash.Write(certMsg.marshal()) if err := c.processCertsFromClient(Certificate{ Certificate: certMsg.certificates, @@ -632,7 +630,7 @@ func (hs *serverHandshakeState) doFullHandshake() error { pub = c.peerCertificates[0].PublicKey } - msg, err = c.readHandshake() + msg, err = c.readHandshake(&hs.finishedHash) if err != nil { return err } @@ -650,7 +648,6 @@ func (hs *serverHandshakeState) doFullHandshake() error { c.sendAlert(alertUnexpectedMessage) return unexpectedMessageError(ckx, msg) } - hs.finishedHash.Write(ckx.marshal()) preMasterSecret, err := keyAgreement.processClientKeyExchange(c.config, hs.cert, ckx, c.vers) if err != nil { @@ -670,7 +667,10 @@ func (hs *serverHandshakeState) doFullHandshake() error { // to the client's certificate. This allows us to verify that the client is in // possession of the private key of the certificate. if len(c.peerCertificates) > 0 { - msg, err = c.readHandshake() + // certificateVerifyMsg is included in the transcript, but not until + // after we verify the handshake signature, since the state before + // this message was sent is used. + msg, err = c.readHandshake(nil) if err != nil { return err } @@ -705,7 +705,9 @@ func (hs *serverHandshakeState) doFullHandshake() error { return errors.New("tls: invalid signature by the client certificate: " + err.Error()) } - hs.finishedHash.Write(certVerify.marshal()) + if err := transcriptMsg(certVerify, &hs.finishedHash); err != nil { + return err + } } hs.finishedHash.discardHandshakeBuffer() @@ -745,7 +747,10 @@ func (hs *serverHandshakeState) readFinished(out []byte) error { return err } - msg, err := c.readHandshake() + // finishedMsg is included in the transcript, but not until after we + // check the client version, since the state before this message was + // sent is used during verification. + msg, err := c.readHandshake(nil) if err != nil { return err } @@ -762,7 +767,10 @@ func (hs *serverHandshakeState) readFinished(out []byte) error { return errors.New("tls: client's Finished message is incorrect") } - hs.finishedHash.Write(clientFinished.marshal()) + if err := transcriptMsg(clientFinished, &hs.finishedHash); err != nil { + return err + } + copy(out, verify) return nil } @@ -796,14 +804,16 @@ func (hs *serverHandshakeState) sendSessionTicket() error { masterSecret: hs.masterSecret, certificates: certsFromClient, } - var err error - m.ticket, err = c.encryptTicket(state.marshal()) + stateBytes, err := state.marshal() + if err != nil { + return err + } + m.ticket, err = c.encryptTicket(stateBytes) if err != nil { return err } - hs.finishedHash.Write(m.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, m.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(m, &hs.finishedHash); err != nil { return err } @@ -813,14 +823,13 @@ func (hs *serverHandshakeState) sendSessionTicket() error { func (hs *serverHandshakeState) sendFinished(out []byte) error { c := hs.c - if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil { + if err := c.writeChangeCipherRecord(); err != nil { return err } finished := new(finishedMsg) finished.verifyData = hs.finishedHash.serverSum(hs.masterSecret) - hs.finishedHash.Write(finished.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(finished, &hs.finishedHash); err != nil { return err } @@ -841,6 +850,10 @@ func (c *Conn) processCertsFromClient(certificate Certificate) error { c.sendAlert(alertBadCertificate) return errors.New("tls: failed to parse client certificate: " + err.Error()) } + if certs[i].PublicKeyAlgorithm == x509.RSA && certs[i].PublicKey.(*rsa.PublicKey).N.BitLen() > maxRSAKeySize { + c.sendAlert(alertBadCertificate) + return fmt.Errorf("tls: client sent certificate containing RSA key larger than %d bits", maxRSAKeySize) + } } if len(certs) == 0 && requiresClientCert(c.config.ClientAuth) { diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/handshake_server_tls13.go b/vendor/github.com/quic-go/qtls-go1-19/handshake_server_tls13.go similarity index 92% rename from vendor/github.com/marten-seemann/qtls-go1-19/handshake_server_tls13.go rename to vendor/github.com/quic-go/qtls-go1-19/handshake_server_tls13.go index 38017776..c4706c44 100644 --- a/vendor/github.com/marten-seemann/qtls-go1-19/handshake_server_tls13.go +++ b/vendor/github.com/quic-go/qtls-go1-19/handshake_server_tls13.go @@ -147,27 +147,14 @@ func (hs *serverHandshakeStateTLS13) processClientHello() error { hs.hello.sessionId = hs.clientHello.sessionId hs.hello.compressionMethod = compressionNone - if hs.suite == nil { - var preferenceList []uint16 - for _, suiteID := range c.config.CipherSuites { - for _, suite := range cipherSuitesTLS13 { - if suite.id == suiteID { - preferenceList = append(preferenceList, suiteID) - break - } - } - } - if len(preferenceList) == 0 { - preferenceList = defaultCipherSuitesTLS13 - if !hasAESGCMHardwareSupport || !aesgcmPreferred(hs.clientHello.cipherSuites) { - preferenceList = defaultCipherSuitesTLS13NoAES - } - } - for _, suiteID := range preferenceList { - hs.suite = mutualCipherSuiteTLS13(hs.clientHello.cipherSuites, suiteID) - if hs.suite != nil { - break - } + preferenceList := defaultCipherSuitesTLS13 + if !hasAESGCMHardwareSupport || !aesgcmPreferred(hs.clientHello.cipherSuites) { + preferenceList = defaultCipherSuitesTLS13NoAES + } + for _, suiteID := range preferenceList { + hs.suite = mutualCipherSuiteTLS13(hs.clientHello.cipherSuites, suiteID) + if hs.suite != nil { + break } } if hs.suite == nil { @@ -334,7 +321,12 @@ func (hs *serverHandshakeStateTLS13) checkForResumption() error { c.sendAlert(alertInternalError) return errors.New("tls: internal error: failed to clone hash") } - transcript.Write(hs.clientHello.marshalWithoutBinders()) + clientHelloBytes, err := hs.clientHello.marshalWithoutBinders() + if err != nil { + c.sendAlert(alertInternalError) + return err + } + transcript.Write(clientHelloBytes) pskBinder := hs.suite.finishedHash(binderKey, transcript) if !hmac.Equal(hs.clientHello.pskBinders[i], pskBinder) { c.sendAlert(alertDecryptError) @@ -347,7 +339,12 @@ func (hs *serverHandshakeStateTLS13) checkForResumption() error { } h := cloneHash(hs.transcript, hs.suite.hash) - h.Write(hs.clientHello.marshal()) + clientHelloWithBindersBytes, err := hs.clientHello.marshal() + if err != nil { + c.sendAlert(alertInternalError) + return err + } + h.Write(clientHelloWithBindersBytes) if hs.encryptedExtensions.earlyData { clientEarlySecret := hs.suite.deriveSecret(hs.earlySecret, "c e traffic", h) c.in.exportKey(Encryption0RTT, hs.suite, clientEarlySecret) @@ -436,8 +433,7 @@ func (hs *serverHandshakeStateTLS13) sendDummyChangeCipherSpec() error { } hs.sentDummyCCS = true - _, err := hs.c.writeRecord(recordTypeChangeCipherSpec, []byte{1}) - return err + return hs.c.writeChangeCipherRecord() } func (hs *serverHandshakeStateTLS13) doHelloRetryRequest(selectedGroup CurveID) error { @@ -445,7 +441,9 @@ func (hs *serverHandshakeStateTLS13) doHelloRetryRequest(selectedGroup CurveID) // The first ClientHello gets double-hashed into the transcript upon a // HelloRetryRequest. See RFC 8446, Section 4.4.1. - hs.transcript.Write(hs.clientHello.marshal()) + if err := transcriptMsg(hs.clientHello, hs.transcript); err != nil { + return err + } chHash := hs.transcript.Sum(nil) hs.transcript.Reset() hs.transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))}) @@ -461,8 +459,7 @@ func (hs *serverHandshakeStateTLS13) doHelloRetryRequest(selectedGroup CurveID) selectedGroup: selectedGroup, } - hs.transcript.Write(helloRetryRequest.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, helloRetryRequest.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(helloRetryRequest, hs.transcript); err != nil { return err } @@ -470,7 +467,8 @@ func (hs *serverHandshakeStateTLS13) doHelloRetryRequest(selectedGroup CurveID) return err } - msg, err := c.readHandshake() + // clientHelloMsg is not included in the transcript. + msg, err := c.readHandshake(nil) if err != nil { return err } @@ -566,9 +564,10 @@ func illegalClientHelloChange(ch, ch1 *clientHelloMsg) bool { func (hs *serverHandshakeStateTLS13) sendServerParameters() error { c := hs.c - hs.transcript.Write(hs.clientHello.marshal()) - hs.transcript.Write(hs.hello.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil { + if err := transcriptMsg(hs.clientHello, hs.transcript); err != nil { + return err + } + if _, err := hs.c.writeHandshakeRecord(hs.hello, hs.transcript); err != nil { return err } @@ -611,8 +610,7 @@ func (hs *serverHandshakeStateTLS13) sendServerParameters() error { hs.encryptedExtensions.additionalExtensions = hs.c.extraConfig.GetExtensions(typeEncryptedExtensions) } - hs.transcript.Write(hs.encryptedExtensions.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, hs.encryptedExtensions.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(hs.encryptedExtensions, hs.transcript); err != nil { return err } @@ -641,8 +639,7 @@ func (hs *serverHandshakeStateTLS13) sendServerCertificate() error { certReq.certificateAuthorities = c.config.ClientCAs.Subjects() } - hs.transcript.Write(certReq.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certReq.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(certReq, hs.transcript); err != nil { return err } } @@ -653,8 +650,7 @@ func (hs *serverHandshakeStateTLS13) sendServerCertificate() error { certMsg.scts = hs.clientHello.scts && len(hs.cert.SignedCertificateTimestamps) > 0 certMsg.ocspStapling = hs.clientHello.ocspStapling && len(hs.cert.OCSPStaple) > 0 - hs.transcript.Write(certMsg.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(certMsg, hs.transcript); err != nil { return err } @@ -685,8 +681,7 @@ func (hs *serverHandshakeStateTLS13) sendServerCertificate() error { } certVerifyMsg.signature = sig - hs.transcript.Write(certVerifyMsg.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certVerifyMsg.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(certVerifyMsg, hs.transcript); err != nil { return err } @@ -700,8 +695,7 @@ func (hs *serverHandshakeStateTLS13) sendServerFinished() error { verifyData: hs.suite.finishedHash(c.out.trafficSecret, hs.transcript), } - hs.transcript.Write(finished.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(finished, hs.transcript); err != nil { return err } @@ -763,7 +757,9 @@ func (hs *serverHandshakeStateTLS13) sendSessionTickets() error { finishedMsg := &finishedMsg{ verifyData: hs.clientFinished, } - hs.transcript.Write(finishedMsg.marshal()) + if err := transcriptMsg(finishedMsg, hs.transcript); err != nil { + return err + } if !hs.shouldSendSessionTickets() { return nil @@ -784,7 +780,7 @@ func (hs *serverHandshakeStateTLS13) sendSessionTickets() error { return err } - if _, err := c.writeRecord(recordTypeHandshake, m.marshal()); err != nil { + if _, err := c.writeHandshakeRecord(m, nil); err != nil { return err } @@ -809,7 +805,7 @@ func (hs *serverHandshakeStateTLS13) readClientCertificate() error { // If we requested a client certificate, then the client must send a // certificate message. If it's empty, no CertificateVerify is sent. - msg, err := c.readHandshake() + msg, err := c.readHandshake(hs.transcript) if err != nil { return err } @@ -819,7 +815,6 @@ func (hs *serverHandshakeStateTLS13) readClientCertificate() error { c.sendAlert(alertUnexpectedMessage) return unexpectedMessageError(certMsg, msg) } - hs.transcript.Write(certMsg.marshal()) if err := c.processCertsFromClient(certMsg.certificate); err != nil { return err @@ -833,7 +828,10 @@ func (hs *serverHandshakeStateTLS13) readClientCertificate() error { } if len(certMsg.certificate.Certificate) != 0 { - msg, err = c.readHandshake() + // certificateVerifyMsg is included in the transcript, but not until + // after we verify the handshake signature, since the state before + // this message was sent is used. + msg, err = c.readHandshake(nil) if err != nil { return err } @@ -864,7 +862,9 @@ func (hs *serverHandshakeStateTLS13) readClientCertificate() error { return errors.New("tls: invalid signature by the client certificate: " + err.Error()) } - hs.transcript.Write(certVerify.marshal()) + if err := transcriptMsg(certVerify, hs.transcript); err != nil { + return err + } } // If we waited until the client certificates to send session tickets, we @@ -879,7 +879,8 @@ func (hs *serverHandshakeStateTLS13) readClientCertificate() error { func (hs *serverHandshakeStateTLS13) readClientFinished() error { c := hs.c - msg, err := c.readHandshake() + // finishedMsg is not included in the transcript. + msg, err := c.readHandshake(nil) if err != nil { return err } diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/key_agreement.go b/vendor/github.com/quic-go/qtls-go1-19/key_agreement.go similarity index 100% rename from vendor/github.com/marten-seemann/qtls-go1-18/key_agreement.go rename to vendor/github.com/quic-go/qtls-go1-19/key_agreement.go diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/key_schedule.go b/vendor/github.com/quic-go/qtls-go1-19/key_schedule.go similarity index 86% rename from vendor/github.com/marten-seemann/qtls-go1-19/key_schedule.go rename to vendor/github.com/quic-go/qtls-go1-19/key_schedule.go index da13904a..708bdc7c 100644 --- a/vendor/github.com/marten-seemann/qtls-go1-19/key_schedule.go +++ b/vendor/github.com/quic-go/qtls-go1-19/key_schedule.go @@ -8,6 +8,7 @@ import ( "crypto/elliptic" "crypto/hmac" "errors" + "fmt" "hash" "io" "math/big" @@ -42,8 +43,24 @@ func (c *cipherSuiteTLS13) expandLabel(secret []byte, label string, context []by hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { b.AddBytes(context) }) + hkdfLabelBytes, err := hkdfLabel.Bytes() + if err != nil { + // Rather than calling BytesOrPanic, we explicitly handle this error, in + // order to provide a reasonable error message. It should be basically + // impossible for this to panic, and routing errors back through the + // tree rooted in this function is quite painful. The labels are fixed + // size, and the context is either a fixed-length computed hash, or + // parsed from a field which has the same length limitation. As such, an + // error here is likely to only be caused during development. + // + // NOTE: another reasonable approach here might be to return a + // randomized slice if we encounter an error, which would break the + // connection, but avoid panicking. This would perhaps be safer but + // significantly more confusing to users. + panic(fmt.Errorf("failed to construct HKDF label: %s", err)) + } out := make([]byte, length) - n, err := hkdf.Expand(c.hash.New, secret, hkdfLabel.BytesOrPanic()).Read(out) + n, err := hkdf.Expand(c.hash.New, secret, hkdfLabelBytes).Read(out) if err != nil || n != length { panic("tls: HKDF-Expand-Label invocation failed unexpectedly") } diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/notboring.go b/vendor/github.com/quic-go/qtls-go1-19/notboring.go similarity index 100% rename from vendor/github.com/marten-seemann/qtls-go1-19/notboring.go rename to vendor/github.com/quic-go/qtls-go1-19/notboring.go diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/prf.go b/vendor/github.com/quic-go/qtls-go1-19/prf.go similarity index 100% rename from vendor/github.com/marten-seemann/qtls-go1-18/prf.go rename to vendor/github.com/quic-go/qtls-go1-19/prf.go diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/ticket.go b/vendor/github.com/quic-go/qtls-go1-19/ticket.go similarity index 96% rename from vendor/github.com/marten-seemann/qtls-go1-19/ticket.go rename to vendor/github.com/quic-go/qtls-go1-19/ticket.go index 81e8a52e..fe1c7a88 100644 --- a/vendor/github.com/marten-seemann/qtls-go1-19/ticket.go +++ b/vendor/github.com/quic-go/qtls-go1-19/ticket.go @@ -34,7 +34,7 @@ type sessionState struct { usedOldKey bool } -func (m *sessionState) marshal() []byte { +func (m *sessionState) marshal() ([]byte, error) { var b cryptobyte.Builder b.AddUint16(m.vers) b.AddUint16(m.cipherSuite) @@ -49,7 +49,7 @@ func (m *sessionState) marshal() []byte { }) } }) - return b.BytesOrPanic() + return b.Bytes() } func (m *sessionState) unmarshal(data []byte) bool { @@ -94,7 +94,7 @@ type sessionStateTLS13 struct { appData []byte } -func (m *sessionStateTLS13) marshal() []byte { +func (m *sessionStateTLS13) marshal() ([]byte, error) { var b cryptobyte.Builder b.AddUint16(VersionTLS13) b.AddUint8(2) // revision @@ -111,7 +111,7 @@ func (m *sessionStateTLS13) marshal() []byte { b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { b.AddBytes(m.appData) }) - return b.BytesOrPanic() + return b.Bytes() } func (m *sessionStateTLS13) unmarshal(data []byte) bool { @@ -227,8 +227,11 @@ func (c *Conn) getSessionTicketMsg(appData []byte) (*newSessionTicketMsgTLS13, e if c.extraConfig != nil { state.maxEarlyData = c.extraConfig.MaxEarlyData } - var err error - m.label, err = c.encryptTicket(state.marshal()) + stateBytes, err := state.marshal() + if err != nil { + return nil, err + } + m.label, err = c.encryptTicket(stateBytes) if err != nil { return nil, err } @@ -270,5 +273,5 @@ func (c *Conn) GetSessionTicket(appData []byte) ([]byte, error) { if err != nil { return nil, err } - return m.marshal(), nil + return m.marshal() } diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/tls.go b/vendor/github.com/quic-go/qtls-go1-19/tls.go similarity index 100% rename from vendor/github.com/marten-seemann/qtls-go1-18/tls.go rename to vendor/github.com/quic-go/qtls-go1-19/tls.go diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/unsafe.go b/vendor/github.com/quic-go/qtls-go1-19/unsafe.go similarity index 100% rename from vendor/github.com/marten-seemann/qtls-go1-18/unsafe.go rename to vendor/github.com/quic-go/qtls-go1-19/unsafe.go diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/LICENSE b/vendor/github.com/quic-go/qtls-go1-20/LICENSE similarity index 100% rename from vendor/github.com/marten-seemann/qtls-go1-19/LICENSE rename to vendor/github.com/quic-go/qtls-go1-20/LICENSE diff --git a/vendor/github.com/quic-go/qtls-go1-20/README.md b/vendor/github.com/quic-go/qtls-go1-20/README.md new file mode 100644 index 00000000..2beaa2f2 --- /dev/null +++ b/vendor/github.com/quic-go/qtls-go1-20/README.md @@ -0,0 +1,6 @@ +# qtls + +[![Go Reference](https://pkg.go.dev/badge/github.com/quic-go/qtls-go1-20.svg)](https://pkg.go.dev/github.com/quic-go/qtls-go1-20) +[![.github/workflows/go-test.yml](https://github.com/quic-go/qtls-go1-20/actions/workflows/go-test.yml/badge.svg)](https://github.com/quic-go/qtls-go1-20/actions/workflows/go-test.yml) + +This repository contains a modified version of the standard library's TLS implementation, modified for the QUIC protocol. It is used by [quic-go](https://github.com/quic-go/quic-go). diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/alert.go b/vendor/github.com/quic-go/qtls-go1-20/alert.go similarity index 100% rename from vendor/github.com/marten-seemann/qtls-go1-19/alert.go rename to vendor/github.com/quic-go/qtls-go1-20/alert.go diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/auth.go b/vendor/github.com/quic-go/qtls-go1-20/auth.go similarity index 98% rename from vendor/github.com/marten-seemann/qtls-go1-18/auth.go rename to vendor/github.com/quic-go/qtls-go1-20/auth.go index 1ef675fd..effc9ace 100644 --- a/vendor/github.com/marten-seemann/qtls-go1-18/auth.go +++ b/vendor/github.com/quic-go/qtls-go1-20/auth.go @@ -169,6 +169,7 @@ var rsaSignatureSchemes = []struct { // and optionally filtered by its explicit SupportedSignatureAlgorithms. // // This function must be kept in sync with supportedSignatureAlgorithms. +// FIPS filtering is applied in the caller, selectSignatureScheme. func signatureSchemesForCertificate(version uint16, cert *Certificate) []SignatureScheme { priv, ok := cert.PrivateKey.(crypto.Signer) if !ok { @@ -241,6 +242,9 @@ func selectSignatureScheme(vers uint16, c *Certificate, peerAlgs []SignatureSche // Pick signature scheme in the peer's preference order, as our // preference order is not configurable. for _, preferredAlg := range peerAlgs { + if needFIPS() && !isSupportedSignatureAlgorithm(preferredAlg, fipsSupportedSignatureAlgorithms) { + continue + } if isSupportedSignatureAlgorithm(preferredAlg, supportedAlgs) { return preferredAlg, nil } diff --git a/vendor/github.com/quic-go/qtls-go1-20/cache.go b/vendor/github.com/quic-go/qtls-go1-20/cache.go new file mode 100644 index 00000000..99e0c5fb --- /dev/null +++ b/vendor/github.com/quic-go/qtls-go1-20/cache.go @@ -0,0 +1,95 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package qtls + +import ( + "crypto/x509" + "runtime" + "sync" + "sync/atomic" +) + +type cacheEntry struct { + refs atomic.Int64 + cert *x509.Certificate +} + +// certCache implements an intern table for reference counted x509.Certificates, +// implemented in a similar fashion to BoringSSL's CRYPTO_BUFFER_POOL. This +// allows for a single x509.Certificate to be kept in memory and referenced from +// multiple Conns. Returned references should not be mutated by callers. Certificates +// are still safe to use after they are removed from the cache. +// +// Certificates are returned wrapped in a activeCert struct that should be held by +// the caller. When references to the activeCert are freed, the number of references +// to the certificate in the cache is decremented. Once the number of references +// reaches zero, the entry is evicted from the cache. +// +// The main difference between this implementation and CRYPTO_BUFFER_POOL is that +// CRYPTO_BUFFER_POOL is a more generic structure which supports blobs of data, +// rather than specific structures. Since we only care about x509.Certificates, +// certCache is implemented as a specific cache, rather than a generic one. +// +// See https://boringssl.googlesource.com/boringssl/+/master/include/openssl/pool.h +// and https://boringssl.googlesource.com/boringssl/+/master/crypto/pool/pool.c +// for the BoringSSL reference. +type certCache struct { + sync.Map +} + +var clientCertCache = new(certCache) + +// activeCert is a handle to a certificate held in the cache. Once there are +// no alive activeCerts for a given certificate, the certificate is removed +// from the cache by a finalizer. +type activeCert struct { + cert *x509.Certificate +} + +// active increments the number of references to the entry, wraps the +// certificate in the entry in a activeCert, and sets the finalizer. +// +// Note that there is a race between active and the finalizer set on the +// returned activeCert, triggered if active is called after the ref count is +// decremented such that refs may be > 0 when evict is called. We consider this +// safe, since the caller holding an activeCert for an entry that is no longer +// in the cache is fine, with the only side effect being the memory overhead of +// there being more than one distinct reference to a certificate alive at once. +func (cc *certCache) active(e *cacheEntry) *activeCert { + e.refs.Add(1) + a := &activeCert{e.cert} + runtime.SetFinalizer(a, func(_ *activeCert) { + if e.refs.Add(-1) == 0 { + cc.evict(e) + } + }) + return a +} + +// evict removes a cacheEntry from the cache. +func (cc *certCache) evict(e *cacheEntry) { + cc.Delete(string(e.cert.Raw)) +} + +// newCert returns a x509.Certificate parsed from der. If there is already a copy +// of the certificate in the cache, a reference to the existing certificate will +// be returned. Otherwise, a fresh certificate will be added to the cache, and +// the reference returned. The returned reference should not be mutated. +func (cc *certCache) newCert(der []byte) (*activeCert, error) { + if entry, ok := cc.Load(string(der)); ok { + return cc.active(entry.(*cacheEntry)), nil + } + + cert, err := x509.ParseCertificate(der) + if err != nil { + return nil, err + } + + entry := &cacheEntry{cert: cert} + if entry, loaded := cc.LoadOrStore(string(der), entry); loaded { + return cc.active(entry.(*cacheEntry)), nil + } + return cc.active(entry), nil +} diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/cipher_suites.go b/vendor/github.com/quic-go/qtls-go1-20/cipher_suites.go similarity index 91% rename from vendor/github.com/marten-seemann/qtls-go1-18/cipher_suites.go rename to vendor/github.com/quic-go/qtls-go1-20/cipher_suites.go index e0be5147..43d21315 100644 --- a/vendor/github.com/marten-seemann/qtls-go1-18/cipher_suites.go +++ b/vendor/github.com/quic-go/qtls-go1-20/cipher_suites.go @@ -226,57 +226,56 @@ var cipherSuitesTLS13 = []*cipherSuiteTLS13{ // TODO: replace with a map. // // - Anything else comes before RC4 // -// RC4 has practically exploitable biases. See https://www.rc4nomore.com. +// RC4 has practically exploitable biases. See https://www.rc4nomore.com. // // - Anything else comes before CBC_SHA256 // -// SHA-256 variants of the CBC ciphersuites don't implement any Lucky13 -// countermeasures. See http://www.isg.rhul.ac.uk/tls/Lucky13.html and -// https://www.imperialviolet.org/2013/02/04/luckythirteen.html. +// SHA-256 variants of the CBC ciphersuites don't implement any Lucky13 +// countermeasures. See http://www.isg.rhul.ac.uk/tls/Lucky13.html and +// https://www.imperialviolet.org/2013/02/04/luckythirteen.html. // // - Anything else comes before 3DES // -// 3DES has 64-bit blocks, which makes it fundamentally susceptible to -// birthday attacks. See https://sweet32.info. +// 3DES has 64-bit blocks, which makes it fundamentally susceptible to +// birthday attacks. See https://sweet32.info. // // - ECDHE comes before anything else // -// Once we got the broken stuff out of the way, the most important -// property a cipher suite can have is forward secrecy. We don't -// implement FFDHE, so that means ECDHE. +// Once we got the broken stuff out of the way, the most important +// property a cipher suite can have is forward secrecy. We don't +// implement FFDHE, so that means ECDHE. // // - AEADs come before CBC ciphers // -// Even with Lucky13 countermeasures, MAC-then-Encrypt CBC cipher suites -// are fundamentally fragile, and suffered from an endless sequence of -// padding oracle attacks. See https://eprint.iacr.org/2015/1129, -// https://www.imperialviolet.org/2014/12/08/poodleagain.html, and -// https://blog.cloudflare.com/yet-another-padding-oracle-in-openssl-cbc-ciphersuites/. +// Even with Lucky13 countermeasures, MAC-then-Encrypt CBC cipher suites +// are fundamentally fragile, and suffered from an endless sequence of +// padding oracle attacks. See https://eprint.iacr.org/2015/1129, +// https://www.imperialviolet.org/2014/12/08/poodleagain.html, and +// https://blog.cloudflare.com/yet-another-padding-oracle-in-openssl-cbc-ciphersuites/. // // - AES comes before ChaCha20 // -// When AES hardware is available, AES-128-GCM and AES-256-GCM are faster -// than ChaCha20Poly1305. +// When AES hardware is available, AES-128-GCM and AES-256-GCM are faster +// than ChaCha20Poly1305. // -// When AES hardware is not available, AES-128-GCM is one or more of: much -// slower, way more complex, and less safe (because not constant time) -// than ChaCha20Poly1305. +// When AES hardware is not available, AES-128-GCM is one or more of: much +// slower, way more complex, and less safe (because not constant time) +// than ChaCha20Poly1305. // -// We use this list if we think both peers have AES hardware, and -// cipherSuitesPreferenceOrderNoAES otherwise. +// We use this list if we think both peers have AES hardware, and +// cipherSuitesPreferenceOrderNoAES otherwise. // // - AES-128 comes before AES-256 // -// The only potential advantages of AES-256 are better multi-target -// margins, and hypothetical post-quantum properties. Neither apply to -// TLS, and AES-256 is slower due to its four extra rounds (which don't -// contribute to the advantages above). +// The only potential advantages of AES-256 are better multi-target +// margins, and hypothetical post-quantum properties. Neither apply to +// TLS, and AES-256 is slower due to its four extra rounds (which don't +// contribute to the advantages above). // // - ECDSA comes before RSA // -// The relative order of ECDSA and RSA cipher suites doesn't matter, -// as they depend on the certificate. Pick one to get a stable order. -// +// The relative order of ECDSA and RSA cipher suites doesn't matter, +// as they depend on the certificate. Pick one to get a stable order. var cipherSuitesPreferenceOrder = []uint16{ // AEADs w/ ECDHE TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, @@ -419,7 +418,9 @@ func cipherAES(key, iv []byte, isRead bool) any { // macSHA1 returns a SHA-1 based constant time MAC. func macSHA1(key []byte) hash.Hash { - return hmac.New(newConstantTimeHash(sha1.New), key) + h := sha1.New + h = newConstantTimeHash(h) + return hmac.New(h, key) } // macSHA256 returns a SHA-256 based MAC. This is only supported in TLS 1.2 and @@ -464,7 +465,7 @@ func (f *prefixNonceAEAD) Open(out, nonce, ciphertext, additionalData []byte) ([ return f.aead.Open(out, f.nonce[:], ciphertext, additionalData) } -// xoredNonceAEAD wraps an AEAD by XORing in a fixed pattern to the nonce +// xorNonceAEAD wraps an AEAD by XORing in a fixed pattern to the nonce // before each call. type xorNonceAEAD struct { nonceMask [aeadNonceLength]byte @@ -507,7 +508,8 @@ func aeadAESGCM(key, noncePrefix []byte) aead { if err != nil { panic(err) } - aead, err := cipher.NewGCM(aes) + var aead cipher.AEAD + aead, err = cipher.NewGCM(aes) if err != nil { panic(err) } diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/common.go b/vendor/github.com/quic-go/qtls-go1-20/common.go similarity index 97% rename from vendor/github.com/marten-seemann/qtls-go1-18/common.go rename to vendor/github.com/quic-go/qtls-go1-20/common.go index 4c9aeeb4..074dd9dc 100644 --- a/vendor/github.com/marten-seemann/qtls-go1-18/common.go +++ b/vendor/github.com/quic-go/qtls-go1-20/common.go @@ -181,11 +181,11 @@ const ( // hash function associated with the Ed25519 signature scheme. var directSigning crypto.Hash = 0 -// supportedSignatureAlgorithms contains the signature and hash algorithms that +// defaultSupportedSignatureAlgorithms contains the signature and hash algorithms that // the code advertises as supported in a TLS 1.2+ ClientHello and in a TLS 1.2+ // CertificateRequest. The two fields are merged to match with TLS 1.3. // Note that in TLS 1.2, the ECDSA algorithms are not constrained to P-256, etc. -var supportedSignatureAlgorithms = []SignatureScheme{ +var defaultSupportedSignatureAlgorithms = []SignatureScheme{ PSSWithSHA256, ECDSAWithP256AndSHA256, Ed25519, @@ -258,6 +258,8 @@ type connectionState struct { // On the client side, it can't be empty. On the server side, it can be // empty if Config.ClientAuth is not RequireAnyClientCert or // RequireAndVerifyClientCert. + // + // PeerCertificates and its contents should not be modified. PeerCertificates []*x509.Certificate // VerifiedChains is a list of one or more chains where the first element is @@ -267,6 +269,8 @@ type connectionState struct { // On the client side, it's set if Config.InsecureSkipVerify is false. On // the server side, it's set if Config.ClientAuth is VerifyClientCertIfGiven // (and the peer provided a certificate) or RequireAndVerifyClientCert. + // + // VerifiedChains and its contents should not be modified. VerifiedChains [][]*x509.Certificate // SignedCertificateTimestamps is a list of SCTs provided by the peer @@ -345,7 +349,8 @@ type clientSessionState struct { // goroutines. Up to TLS 1.2, only ticket-based resumption is supported, not // SessionID-based resumption. In TLS 1.3 they were merged into PSK modes, which // are supported via this interface. -//go:generate sh -c "mockgen -package qtls -destination mock_client_session_cache_test.go github.com/marten-seemann/qtls-go1-17 ClientSessionCache" +// +//go:generate sh -c "mockgen -package qtls -destination mock_client_session_cache_test.go github.com/quic-go/qtls-go1-20 ClientSessionCache" type ClientSessionCache = tls.ClientSessionCache // SignatureScheme is a tls.SignatureScheme @@ -543,6 +548,8 @@ type config struct { // If GetCertificate is nil or returns nil, then the certificate is // retrieved from NameToCertificate. If NameToCertificate is nil, the // best element of Certificates will be used. + // + // Once a Certificate is returned it should not be modified. GetCertificate func(*ClientHelloInfo) (*Certificate, error) // GetClientCertificate, if not nil, is called when a server requests a @@ -558,6 +565,8 @@ type config struct { // // GetClientCertificate may be called multiple times for the same // connection if renegotiation occurs or if TLS 1.3 is in use. + // + // Once a Certificate is returned it should not be modified. GetClientCertificate func(*CertificateRequestInfo) (*Certificate, error) // GetConfigForClient, if not nil, is called after a ClientHello is @@ -586,6 +595,8 @@ type config struct { // setting InsecureSkipVerify, or (for a server) when ClientAuth is // RequestClientCert or RequireAnyClientCert, then this callback will // be considered but the verifiedChains argument will always be nil. + // + // verifiedChains and its contents should not be modified. VerifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error // VerifyConnection, if not nil, is called after normal certificate @@ -714,7 +725,7 @@ type config struct { // mutex protects sessionTicketKeys and autoSessionTicketKeys. mutex sync.RWMutex - // sessionTicketKeys contains zero or more ticket keys. If set, it means the + // sessionTicketKeys contains zero or more ticket keys. If set, it means // the keys were set with SessionTicketKey or SetSessionTicketKeys. The // first key is used for new tickets and any subsequent keys can be used to // decrypt old tickets. The slice contents are not protected by the mutex @@ -1037,6 +1048,9 @@ func (c *config) time() time.Time { } func (c *config) cipherSuites() []uint16 { + if needFIPS() { + return fipsCipherSuites(c) + } if c.CipherSuites != nil { return c.CipherSuites } @@ -1050,10 +1064,6 @@ var supportedVersions = []uint16{ VersionTLS10, } -// debugEnableTLS10 enables TLS 1.0. See issue 45428. -// We don't care about TLS1.0 in qtls. Always disable it. -var debugEnableTLS10 = false - // roleClient and roleServer are meant to call supportedVersions and parents // with more readability at the callsite. const roleClient = true @@ -1062,7 +1072,10 @@ const roleServer = false func (c *config) supportedVersions(isClient bool) []uint16 { versions := make([]uint16, 0, len(supportedVersions)) for _, v := range supportedVersions { - if (c == nil || c.MinVersion == 0) && !debugEnableTLS10 && + if needFIPS() && (v < fipsMinVersion(c) || v > fipsMaxVersion(c)) { + continue + } + if (c == nil || c.MinVersion == 0) && isClient && v < VersionTLS12 { continue } @@ -1102,6 +1115,9 @@ func supportedVersionsFromMax(maxVersion uint16) []uint16 { var defaultCurvePreferences = []CurveID{X25519, CurveP256, CurveP384, CurveP521} func (c *config) curvePreferences() []CurveID { + if needFIPS() { + return fipsCurvePreferences(c) + } if c == nil || len(c.CurvePreferences) == 0 { return defaultCurvePreferences } @@ -1380,7 +1396,7 @@ func (c *config) writeKeyLog(label string, clientRandom, secret []byte) error { return nil } - logLine := []byte(fmt.Sprintf("%s %x %x\n", label, clientRandom, secret)) + logLine := fmt.Appendf(nil, "%s %x %x\n", label, clientRandom, secret) writerMutex.Lock() _, err := c.KeyLogWriter.Write(logLine) @@ -1406,7 +1422,7 @@ func leafCertificate(c *Certificate) (*x509.Certificate, error) { } type handshakeMessage interface { - marshal() []byte + marshal() ([]byte, error) unmarshal([]byte) bool } @@ -1505,3 +1521,18 @@ func isSupportedSignatureAlgorithm(sigAlg SignatureScheme, supportedSignatureAlg } return false } + +// CertificateVerificationError is returned when certificate verification fails during the handshake. +type CertificateVerificationError struct { + // UnverifiedCertificates and its contents should not be modified. + UnverifiedCertificates []*x509.Certificate + Err error +} + +func (e *CertificateVerificationError) Error() string { + return fmt.Sprintf("tls: failed to verify certificate: %s", e.Err) +} + +func (e *CertificateVerificationError) Unwrap() error { + return e.Err +} diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/conn.go b/vendor/github.com/quic-go/qtls-go1-20/conn.go similarity index 95% rename from vendor/github.com/marten-seemann/qtls-go1-18/conn.go rename to vendor/github.com/quic-go/qtls-go1-20/conn.go index 2b8c7307..656c83c7 100644 --- a/vendor/github.com/marten-seemann/qtls-go1-18/conn.go +++ b/vendor/github.com/quic-go/qtls-go1-20/conn.go @@ -30,11 +30,10 @@ type Conn struct { isClient bool handshakeFn func(context.Context) error // (*Conn).clientHandshake or serverHandshake - // handshakeStatus is 1 if the connection is currently transferring + // isHandshakeComplete is true if the connection is currently transferring // application data (i.e. is not currently processing a handshake). - // handshakeStatus == 1 implies handshakeErr == nil. - // This field is only to be accessed with sync/atomic. - handshakeStatus uint32 + // isHandshakeComplete is true implies handshakeErr == nil. + isHandshakeComplete atomic.Bool // constant after handshake; protected by handshakeMutex handshakeMutex sync.Mutex handshakeErr error // error resulting from handshake @@ -52,6 +51,9 @@ type Conn struct { ocspResponse []byte // stapled OCSP response scts [][]byte // signed certificate timestamps from server peerCertificates []*x509.Certificate + // activeCertHandles contains the cache handles to certificates in + // peerCertificates that are used to track active references. + activeCertHandles []*activeCert // verifiedChains contains the certificate chains that we built, as // opposed to the ones presented by the server. verifiedChains [][]*x509.Certificate @@ -117,10 +119,9 @@ type Conn struct { // handshake, nor deliver application data. Protected by in.Mutex. retryCount int - // activeCall is an atomic int32; the low bit is whether Close has - // been called. the rest of the bits are the number of goroutines - // in Conn.Write. - activeCall int32 + // activeCall indicates whether Close has been call in the low bit. + // the rest of the bits are the number of goroutines in Conn.Write. + activeCall atomic.Int32 used0RTT bool @@ -621,12 +622,14 @@ func (c *Conn) readChangeCipherSpec() error { // readRecordOrCCS reads one or more TLS records from the connection and // updates the record layer state. Some invariants: -// * c.in must be locked -// * c.input must be empty +// - c.in must be locked +// - c.input must be empty +// // During the handshake one and only one of the following will happen: // - c.hand grows // - c.in.changeCipherSpec is called // - an error is returned +// // After the handshake one and only one of the following will happen: // - c.hand grows // - c.input is set @@ -635,7 +638,7 @@ func (c *Conn) readRecordOrCCS(expectChangeCipherSpec bool) error { if c.in.err != nil { return c.in.err } - handshakeComplete := c.handshakeComplete() + handshakeComplete := c.isHandshakeComplete.Load() // This function modifies c.rawInput, which owns the c.input memory. if c.input.Len() != 0 { @@ -792,7 +795,7 @@ func (c *Conn) readRecordOrCCS(expectChangeCipherSpec bool) error { return nil } -// retryReadRecord recurses into readRecordOrCCS to drop a non-advancing record, like +// retryReadRecord recurs into readRecordOrCCS to drop a non-advancing record, like // a warning alert, empty application_data, or a change_cipher_spec in TLS 1.3. func (c *Conn) retryReadRecord(expectChangeCipherSpec bool) error { c.retryCount++ @@ -1039,25 +1042,46 @@ func (c *Conn) writeRecordLocked(typ recordType, data []byte) (int, error) { return n, nil } -// writeRecord writes a TLS record with the given type and payload to the -// connection and updates the record layer state. -func (c *Conn) writeRecord(typ recordType, data []byte) (int, error) { +// writeHandshakeRecord writes a handshake message to the connection and updates +// the record layer state. If transcript is non-nil the marshalled message is +// written to it. +func (c *Conn) writeHandshakeRecord(msg handshakeMessage, transcript transcriptHash) (int, error) { + data, err := msg.marshal() + if err != nil { + return 0, err + } + + c.out.Lock() + defer c.out.Unlock() + + if transcript != nil { + transcript.Write(data) + } + if c.extraConfig != nil && c.extraConfig.AlternativeRecordLayer != nil { - if typ == recordTypeChangeCipherSpec { - return len(data), nil - } return c.extraConfig.AlternativeRecordLayer.WriteRecord(data) } + return c.writeRecordLocked(recordTypeHandshake, data) +} + +// writeChangeCipherRecord writes a ChangeCipherSpec message to the connection and +// updates the record layer state. +func (c *Conn) writeChangeCipherRecord() error { + if c.extraConfig != nil && c.extraConfig.AlternativeRecordLayer != nil { + return nil + } + c.out.Lock() defer c.out.Unlock() - - return c.writeRecordLocked(typ, data) + _, err := c.writeRecordLocked(recordTypeChangeCipherSpec, []byte{1}) + return err } // readHandshake reads the next handshake message from -// the record layer. -func (c *Conn) readHandshake() (any, error) { +// the record layer. If transcript is non-nil, the message +// is written to the passed transcriptHash. +func (c *Conn) readHandshake(transcript transcriptHash) (any, error) { var data []byte if c.extraConfig != nil && c.extraConfig.AlternativeRecordLayer != nil { var err error @@ -1145,6 +1169,11 @@ func (c *Conn) readHandshake() (any, error) { if !m.unmarshal(data) { return nil, c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage)) } + + if transcript != nil { + transcript.Write(data) + } + return m, nil } @@ -1161,15 +1190,15 @@ var ( func (c *Conn) Write(b []byte) (int, error) { // interlock with Close below for { - x := atomic.LoadInt32(&c.activeCall) + x := c.activeCall.Load() if x&1 != 0 { return 0, net.ErrClosed } - if atomic.CompareAndSwapInt32(&c.activeCall, x, x+2) { + if c.activeCall.CompareAndSwap(x, x+2) { break } } - defer atomic.AddInt32(&c.activeCall, -2) + defer c.activeCall.Add(-2) if err := c.Handshake(); err != nil { return 0, err @@ -1182,7 +1211,7 @@ func (c *Conn) Write(b []byte) (int, error) { return 0, err } - if !c.handshakeComplete() { + if !c.isHandshakeComplete.Load() { return 0, alertInternalError } @@ -1220,7 +1249,7 @@ func (c *Conn) handleRenegotiation() error { return errors.New("tls: internal error: unexpected renegotiation") } - msg, err := c.readHandshake() + msg, err := c.readHandshake(nil) if err != nil { return err } @@ -1252,7 +1281,7 @@ func (c *Conn) handleRenegotiation() error { c.handshakeMutex.Lock() defer c.handshakeMutex.Unlock() - atomic.StoreUint32(&c.handshakeStatus, 0) + c.isHandshakeComplete.Store(false) if c.handshakeErr = c.clientHandshake(context.Background()); c.handshakeErr == nil { c.handshakes++ } @@ -1270,7 +1299,7 @@ func (c *Conn) handlePostHandshakeMessage() error { return c.handleRenegotiation() } - msg, err := c.readHandshake() + msg, err := c.readHandshake(nil) if err != nil { return err } @@ -1306,7 +1335,11 @@ func (c *Conn) handleKeyUpdate(keyUpdate *keyUpdateMsg) error { defer c.out.Unlock() msg := &keyUpdateMsg{} - _, err := c.writeRecordLocked(recordTypeHandshake, msg.marshal()) + msgBytes, err := msg.marshal() + if err != nil { + return err + } + _, err = c.writeRecordLocked(recordTypeHandshake, msgBytes) if err != nil { // Surface the error at the next write. c.out.setErrorLocked(err) @@ -1374,11 +1407,11 @@ func (c *Conn) Close() error { // Interlock with Conn.Write above. var x int32 for { - x = atomic.LoadInt32(&c.activeCall) + x = c.activeCall.Load() if x&1 != 0 { return net.ErrClosed } - if atomic.CompareAndSwapInt32(&c.activeCall, x, x|1) { + if c.activeCall.CompareAndSwap(x, x|1) { break } } @@ -1393,7 +1426,7 @@ func (c *Conn) Close() error { } var alertErr error - if c.handshakeComplete() { + if c.isHandshakeComplete.Load() { if err := c.closeNotify(); err != nil { alertErr = fmt.Errorf("tls: failed to send closeNotify alert (but connection was closed anyway): %w", err) } @@ -1411,7 +1444,7 @@ var errEarlyCloseWrite = errors.New("tls: CloseWrite called before handshake com // called once the handshake has completed and does not call CloseWrite on the // underlying connection. Most callers should just use Close. func (c *Conn) CloseWrite() error { - if !c.handshakeComplete() { + if !c.isHandshakeComplete.Load() { return errEarlyCloseWrite } @@ -1465,7 +1498,7 @@ func (c *Conn) handshakeContext(ctx context.Context) (ret error) { // Fast sync/atomic-based exit if there is no handshake in flight and the // last one succeeded without an error. Avoids the expensive context setup // and mutex for most Read and Write calls. - if c.handshakeComplete() { + if c.isHandshakeComplete.Load() { return nil } @@ -1508,7 +1541,7 @@ func (c *Conn) handshakeContext(ctx context.Context) (ret error) { if err := c.handshakeErr; err != nil { return err } - if c.handshakeComplete() { + if c.isHandshakeComplete.Load() { return nil } @@ -1524,10 +1557,10 @@ func (c *Conn) handshakeContext(ctx context.Context) (ret error) { c.flush() } - if c.handshakeErr == nil && !c.handshakeComplete() { + if c.handshakeErr == nil && !c.isHandshakeComplete.Load() { c.handshakeErr = errors.New("tls: internal error: handshake should have had a result") } - if c.handshakeErr != nil && c.handshakeComplete() { + if c.handshakeErr != nil && c.isHandshakeComplete.Load() { panic("tls: internal error: handshake returned an error but is marked successful") } @@ -1550,7 +1583,7 @@ func (c *Conn) ConnectionStateWith0RTT() ConnectionStateWith0RTT { func (c *Conn) connectionStateLocked() ConnectionState { var state connectionState - state.HandshakeComplete = c.handshakeComplete() + state.HandshakeComplete = c.isHandshakeComplete.Load() state.Version = c.vers state.NegotiatedProtocol = c.clientProtocol state.DidResume = c.didResume @@ -1603,7 +1636,7 @@ func (c *Conn) VerifyHostname(host string) error { if !c.isClient { return errors.New("tls: VerifyHostname called on TLS server connection") } - if !c.handshakeComplete() { + if !c.isHandshakeComplete.Load() { return errors.New("tls: handshake has not yet been performed") } if len(c.verifiedChains) == 0 { @@ -1611,7 +1644,3 @@ func (c *Conn) VerifyHostname(host string) error { } return c.peerCertificates[0].VerifyHostname(host) } - -func (c *Conn) handshakeComplete() bool { - return atomic.LoadUint32(&c.handshakeStatus) == 1 -} diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/cpu.go b/vendor/github.com/quic-go/qtls-go1-20/cpu.go similarity index 100% rename from vendor/github.com/marten-seemann/qtls-go1-19/cpu.go rename to vendor/github.com/quic-go/qtls-go1-20/cpu.go diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/cpu_other.go b/vendor/github.com/quic-go/qtls-go1-20/cpu_other.go similarity index 100% rename from vendor/github.com/marten-seemann/qtls-go1-19/cpu_other.go rename to vendor/github.com/quic-go/qtls-go1-20/cpu_other.go diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/handshake_client.go b/vendor/github.com/quic-go/qtls-go1-20/handshake_client.go similarity index 88% rename from vendor/github.com/marten-seemann/qtls-go1-18/handshake_client.go rename to vendor/github.com/quic-go/qtls-go1-20/handshake_client.go index a2a0eaea..ebb56ebe 100644 --- a/vendor/github.com/marten-seemann/qtls-go1-18/handshake_client.go +++ b/vendor/github.com/quic-go/qtls-go1-20/handshake_client.go @@ -8,6 +8,7 @@ import ( "bytes" "context" "crypto" + "crypto/ecdh" "crypto/ecdsa" "crypto/ed25519" "crypto/rsa" @@ -19,7 +20,6 @@ import ( "io" "net" "strings" - "sync/atomic" "time" "golang.org/x/crypto/cryptobyte" @@ -38,7 +38,9 @@ type clientHandshakeState struct { session *clientSessionState } -func (c *Conn) makeClientHello() (*clientHelloMsg, ecdheParameters, error) { +var testingOnlyForceClientHelloSignatureAlgorithms []SignatureScheme + +func (c *Conn) makeClientHello() (*clientHelloMsg, *ecdh.PrivateKey, error) { config := c.config if len(config.ServerName) == 0 && !config.InsecureSkipVerify { return nil, nil, errors.New("tls: either ServerName or InsecureSkipVerify must be specified in the tls.Config") @@ -134,45 +136,39 @@ func (c *Conn) makeClientHello() (*clientHelloMsg, ecdheParameters, error) { } if hello.vers >= VersionTLS12 { - hello.supportedSignatureAlgorithms = supportedSignatureAlgorithms + hello.supportedSignatureAlgorithms = supportedSignatureAlgorithms() + } + if testingOnlyForceClientHelloSignatureAlgorithms != nil { + hello.supportedSignatureAlgorithms = testingOnlyForceClientHelloSignatureAlgorithms } - var params ecdheParameters + var key *ecdh.PrivateKey if hello.supportedVersions[0] == VersionTLS13 { - var suites []uint16 - for _, suiteID := range configCipherSuites { - for _, suite := range cipherSuitesTLS13 { - if suite.id == suiteID { - suites = append(suites, suiteID) - } - } + if len(hello.supportedVersions) == 1 { + hello.cipherSuites = hello.cipherSuites[:0] } - if len(suites) > 0 { - hello.cipherSuites = suites + if hasAESGCMHardwareSupport { + hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13...) } else { - if hasAESGCMHardwareSupport { - hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13...) - } else { - hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13NoAES...) - } + hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13NoAES...) } curveID := config.curvePreferences()[0] - if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok { + if _, ok := curveForCurveID(curveID); !ok { return nil, nil, errors.New("tls: CurvePreferences includes unsupported curve") } - params, err = generateECDHEParameters(config.rand(), curveID) + key, err = generateECDHEKey(config.rand(), curveID) if err != nil { return nil, nil, err } - hello.keyShares = []keyShare{{group: curveID, data: params.PublicKey()}} + hello.keyShares = []keyShare{{group: curveID, data: key.PublicKey().Bytes()}} } if hello.supportedVersions[0] == VersionTLS13 && c.extraConfig != nil && c.extraConfig.GetExtensions != nil { hello.additionalExtensions = c.extraConfig.GetExtensions(typeClientHello) } - return hello, params, nil + return hello, key, nil } func (c *Conn) clientHandshake(ctx context.Context) (err error) { @@ -185,13 +181,16 @@ func (c *Conn) clientHandshake(ctx context.Context) (err error) { // need to be reset. c.didResume = false - hello, ecdheParams, err := c.makeClientHello() + hello, ecdheKey, err := c.makeClientHello() if err != nil { return err } c.serverName = hello.serverName - cacheKey, session, earlySecret, binderKey := c.loadSession(hello) + cacheKey, session, earlySecret, binderKey, err := c.loadSession(hello) + if err != nil { + return err + } if cacheKey != "" && session != nil { var deletedTicket bool if session.vers == VersionTLS13 && hello.earlyData && c.extraConfig != nil && c.extraConfig.Enable0RTT { @@ -201,11 +200,14 @@ func (c *Conn) clientHandshake(ctx context.Context) (err error) { if suite := cipherSuiteTLS13ByID(session.cipherSuite); suite != nil { h := suite.hash.New() - h.Write(hello.marshal()) + helloBytes, err := hello.marshal() + if err != nil { + return err + } + h.Write(helloBytes) clientEarlySecret := suite.deriveSecret(earlySecret, "c e traffic", h) c.out.exportKey(Encryption0RTT, suite, clientEarlySecret) if err := c.config.writeKeyLog(keyLogLabelEarlyTraffic, hello.random, clientEarlySecret); err != nil { - c.sendAlert(alertInternalError) return err } } @@ -225,11 +227,12 @@ func (c *Conn) clientHandshake(ctx context.Context) (err error) { } } - if _, err := c.writeRecord(recordTypeHandshake, hello.marshal()); err != nil { + if _, err := c.writeHandshakeRecord(hello, nil); err != nil { return err } - msg, err := c.readHandshake() + // serverHelloMsg is not included in the transcript + msg, err := c.readHandshake(nil) if err != nil { return err } @@ -262,7 +265,7 @@ func (c *Conn) clientHandshake(ctx context.Context) (err error) { ctx: ctx, serverHello: serverHello, hello: hello, - ecdheParams: ecdheParams, + ecdheKey: ecdheKey, session: session, earlySecret: earlySecret, binderKey: binderKey, @@ -322,9 +325,9 @@ func (c *Conn) decodeSessionState(session *clientSessionState) (uint32 /* max ea } func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string, - session *clientSessionState, earlySecret, binderKey []byte) { + session *clientSessionState, earlySecret, binderKey []byte, err error) { if c.config.SessionTicketsDisabled || c.config.ClientSessionCache == nil { - return "", nil, nil, nil + return "", nil, nil, nil, nil } hello.ticketSupported = true @@ -339,14 +342,14 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string, // renegotiation is primarily used to allow a client to send a client // certificate, which would be skipped if session resumption occurred. if c.handshakes != 0 { - return "", nil, nil, nil + return "", nil, nil, nil, nil } // Try to resume a previously negotiated TLS session, if available. cacheKey = clientSessionCacheKey(c.conn.RemoteAddr(), c.config) sess, ok := c.config.ClientSessionCache.Get(cacheKey) if !ok || sess == nil { - return cacheKey, nil, nil, nil + return cacheKey, nil, nil, nil, nil } session = fromClientSessionState(sess) @@ -357,7 +360,7 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string, maxEarlyData, appData, ok = c.decodeSessionState(session) if !ok { // delete it, if parsing failed c.config.ClientSessionCache.Put(cacheKey, nil) - return cacheKey, nil, nil, nil + return cacheKey, nil, nil, nil, nil } } @@ -370,7 +373,7 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string, } } if !versOk { - return cacheKey, nil, nil, nil + return cacheKey, nil, nil, nil, nil } // Check that the cached server certificate is not expired, and that it's @@ -379,16 +382,16 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string, if !c.config.InsecureSkipVerify { if len(session.verifiedChains) == 0 { // The original connection had InsecureSkipVerify, while this doesn't. - return cacheKey, nil, nil, nil + return cacheKey, nil, nil, nil, nil } serverCert := session.serverCertificates[0] if c.config.time().After(serverCert.NotAfter) { // Expired certificate, delete the entry. c.config.ClientSessionCache.Put(cacheKey, nil) - return cacheKey, nil, nil, nil + return cacheKey, nil, nil, nil, nil } if err := serverCert.VerifyHostname(c.config.ServerName); err != nil { - return cacheKey, nil, nil, nil + return cacheKey, nil, nil, nil, nil } } @@ -396,7 +399,7 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string, // In TLS 1.2 the cipher suite must match the resumed session. Ensure we // are still offering it. if mutualCipherSuite(hello.cipherSuites, session.cipherSuite) == nil { - return cacheKey, nil, nil, nil + return cacheKey, nil, nil, nil, nil } hello.sessionTicket = session.sessionTicket @@ -406,14 +409,14 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string, // Check that the session ticket is not expired. if c.config.time().After(session.useBy) { c.config.ClientSessionCache.Put(cacheKey, nil) - return cacheKey, nil, nil, nil + return cacheKey, nil, nil, nil, nil } // In TLS 1.3 the KDF hash must match the resumed session. Ensure we // offer at least one cipher suite with that hash. cipherSuite := cipherSuiteTLS13ByID(session.cipherSuite) if cipherSuite == nil { - return cacheKey, nil, nil, nil + return cacheKey, nil, nil, nil, nil } cipherSuiteOk := false for _, offeredID := range hello.cipherSuites { @@ -424,7 +427,7 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string, } } if !cipherSuiteOk { - return cacheKey, nil, nil, nil + return cacheKey, nil, nil, nil, nil } // Set the pre_shared_key extension. See RFC 8446, Section 4.2.11.1. @@ -445,9 +448,15 @@ func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string, hello.earlyData = c.extraConfig.Enable0RTT && maxEarlyData > 0 } transcript := cipherSuite.hash.New() - transcript.Write(hello.marshalWithoutBinders()) + helloBytes, err := hello.marshalWithoutBinders() + if err != nil { + return "", nil, nil, nil, err + } + transcript.Write(helloBytes) pskBinders := [][]byte{cipherSuite.finishedHash(binderKey, transcript)} - hello.updateBinders(pskBinders) + if err := hello.updateBinders(pskBinders); err != nil { + return "", nil, nil, nil, err + } if session.vers == VersionTLS13 && c.extraConfig != nil && c.extraConfig.SetAppDataFromSessionState != nil { c.extraConfig.SetAppDataFromSessionState(appData) @@ -495,8 +504,12 @@ func (hs *clientHandshakeState) handshake() error { hs.finishedHash.discardHandshakeBuffer() } - hs.finishedHash.Write(hs.hello.marshal()) - hs.finishedHash.Write(hs.serverHello.marshal()) + if err := transcriptMsg(hs.hello, &hs.finishedHash); err != nil { + return err + } + if err := transcriptMsg(hs.serverHello, &hs.finishedHash); err != nil { + return err + } c.buffering = true c.didResume = isResume @@ -549,7 +562,7 @@ func (hs *clientHandshakeState) handshake() error { } c.ekm = ekmFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.hello.random, hs.serverHello.random) - atomic.StoreUint32(&c.handshakeStatus, 1) + c.isHandshakeComplete.Store(true) return nil } @@ -567,7 +580,7 @@ func (hs *clientHandshakeState) pickCipherSuite() error { func (hs *clientHandshakeState) doFullHandshake() error { c := hs.c - msg, err := c.readHandshake() + msg, err := c.readHandshake(&hs.finishedHash) if err != nil { return err } @@ -576,9 +589,8 @@ func (hs *clientHandshakeState) doFullHandshake() error { c.sendAlert(alertUnexpectedMessage) return unexpectedMessageError(certMsg, msg) } - hs.finishedHash.Write(certMsg.marshal()) - msg, err = c.readHandshake() + msg, err = c.readHandshake(&hs.finishedHash) if err != nil { return err } @@ -596,11 +608,10 @@ func (hs *clientHandshakeState) doFullHandshake() error { c.sendAlert(alertUnexpectedMessage) return errors.New("tls: received unexpected CertificateStatus message") } - hs.finishedHash.Write(cs.marshal()) c.ocspResponse = cs.response - msg, err = c.readHandshake() + msg, err = c.readHandshake(&hs.finishedHash) if err != nil { return err } @@ -629,14 +640,13 @@ func (hs *clientHandshakeState) doFullHandshake() error { skx, ok := msg.(*serverKeyExchangeMsg) if ok { - hs.finishedHash.Write(skx.marshal()) err = keyAgreement.processServerKeyExchange(c.config, hs.hello, hs.serverHello, c.peerCertificates[0], skx) if err != nil { c.sendAlert(alertUnexpectedMessage) return err } - msg, err = c.readHandshake() + msg, err = c.readHandshake(&hs.finishedHash) if err != nil { return err } @@ -647,7 +657,6 @@ func (hs *clientHandshakeState) doFullHandshake() error { certReq, ok := msg.(*certificateRequestMsg) if ok { certRequested = true - hs.finishedHash.Write(certReq.marshal()) cri := certificateRequestInfoFromMsg(hs.ctx, c.vers, certReq) if chainToSend, err = c.getClientCertificate(cri); err != nil { @@ -655,7 +664,7 @@ func (hs *clientHandshakeState) doFullHandshake() error { return err } - msg, err = c.readHandshake() + msg, err = c.readHandshake(&hs.finishedHash) if err != nil { return err } @@ -666,7 +675,6 @@ func (hs *clientHandshakeState) doFullHandshake() error { c.sendAlert(alertUnexpectedMessage) return unexpectedMessageError(shd, msg) } - hs.finishedHash.Write(shd.marshal()) // If the server requested a certificate then we have to send a // Certificate message, even if it's empty because we don't have a @@ -674,8 +682,7 @@ func (hs *clientHandshakeState) doFullHandshake() error { if certRequested { certMsg = new(certificateMsg) certMsg.certificates = chainToSend.Certificate - hs.finishedHash.Write(certMsg.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(certMsg, &hs.finishedHash); err != nil { return err } } @@ -686,8 +693,7 @@ func (hs *clientHandshakeState) doFullHandshake() error { return err } if ckx != nil { - hs.finishedHash.Write(ckx.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, ckx.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(ckx, &hs.finishedHash); err != nil { return err } } @@ -723,7 +729,7 @@ func (hs *clientHandshakeState) doFullHandshake() error { } } - signed := hs.finishedHash.hashForClientCertificate(sigType, sigHash, hs.masterSecret) + signed := hs.finishedHash.hashForClientCertificate(sigType, sigHash) signOpts := crypto.SignerOpts(sigHash) if sigType == signatureRSAPSS { signOpts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: sigHash} @@ -734,8 +740,7 @@ func (hs *clientHandshakeState) doFullHandshake() error { return err } - hs.finishedHash.Write(certVerify.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certVerify.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(certVerify, &hs.finishedHash); err != nil { return err } } @@ -870,7 +875,10 @@ func (hs *clientHandshakeState) readFinished(out []byte) error { return err } - msg, err := c.readHandshake() + // finishedMsg is included in the transcript, but not until after we + // check the client version, since the state before this message was + // sent is used during verification. + msg, err := c.readHandshake(nil) if err != nil { return err } @@ -886,7 +894,11 @@ func (hs *clientHandshakeState) readFinished(out []byte) error { c.sendAlert(alertHandshakeFailure) return errors.New("tls: server's Finished message was incorrect") } - hs.finishedHash.Write(serverFinished.marshal()) + + if err := transcriptMsg(serverFinished, &hs.finishedHash); err != nil { + return err + } + copy(out, verify) return nil } @@ -897,7 +909,7 @@ func (hs *clientHandshakeState) readSessionTicket() error { } c := hs.c - msg, err := c.readHandshake() + msg, err := c.readHandshake(&hs.finishedHash) if err != nil { return err } @@ -906,7 +918,6 @@ func (hs *clientHandshakeState) readSessionTicket() error { c.sendAlert(alertUnexpectedMessage) return unexpectedMessageError(sessionTicketMsg, msg) } - hs.finishedHash.Write(sessionTicketMsg.marshal()) hs.session = &clientSessionState{ sessionTicket: sessionTicketMsg.ticket, @@ -926,31 +937,40 @@ func (hs *clientHandshakeState) readSessionTicket() error { func (hs *clientHandshakeState) sendFinished(out []byte) error { c := hs.c - if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil { + if err := c.writeChangeCipherRecord(); err != nil { return err } finished := new(finishedMsg) finished.verifyData = hs.finishedHash.clientSum(hs.masterSecret) - hs.finishedHash.Write(finished.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(finished, &hs.finishedHash); err != nil { return err } copy(out, finished.verifyData) return nil } +// maxRSAKeySize is the maximum RSA key size in bits that we are willing +// to verify the signatures of during a TLS handshake. +const maxRSAKeySize = 8192 + // verifyServerCertificate parses and verifies the provided chain, setting // c.verifiedChains and c.peerCertificates or sending the appropriate alert. func (c *Conn) verifyServerCertificate(certificates [][]byte) error { + activeHandles := make([]*activeCert, len(certificates)) certs := make([]*x509.Certificate, len(certificates)) for i, asn1Data := range certificates { - cert, err := x509.ParseCertificate(asn1Data) + cert, err := clientCertCache.newCert(asn1Data) if err != nil { c.sendAlert(alertBadCertificate) return errors.New("tls: failed to parse certificate from server: " + err.Error()) } - certs[i] = cert + if cert.cert.PublicKeyAlgorithm == x509.RSA && cert.cert.PublicKey.(*rsa.PublicKey).N.BitLen() > maxRSAKeySize { + c.sendAlert(alertBadCertificate) + return fmt.Errorf("tls: server sent certificate containing RSA key larger than %d bits", maxRSAKeySize) + } + activeHandles[i] = cert + certs[i] = cert.cert } if !c.config.InsecureSkipVerify { @@ -960,6 +980,7 @@ func (c *Conn) verifyServerCertificate(certificates [][]byte) error { DNSName: c.config.ServerName, Intermediates: x509.NewCertPool(), } + for _, cert := range certs[1:] { opts.Intermediates.AddCert(cert) } @@ -967,7 +988,7 @@ func (c *Conn) verifyServerCertificate(certificates [][]byte) error { c.verifiedChains, err = certs[0].Verify(opts) if err != nil { c.sendAlert(alertBadCertificate) - return err + return &CertificateVerificationError{UnverifiedCertificates: certs, Err: err} } } @@ -979,6 +1000,7 @@ func (c *Conn) verifyServerCertificate(certificates [][]byte) error { return fmt.Errorf("tls: server's certificate contains an unsupported type of public key: %T", certs[0].PublicKey) } + c.activeCertHandles = activeHandles c.peerCertificates = certs if c.config.VerifyPeerCertificate != nil { diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/handshake_client_tls13.go b/vendor/github.com/quic-go/qtls-go1-20/handshake_client_tls13.go similarity index 88% rename from vendor/github.com/marten-seemann/qtls-go1-18/handshake_client_tls13.go rename to vendor/github.com/quic-go/qtls-go1-20/handshake_client_tls13.go index 09d602d0..60ae2995 100644 --- a/vendor/github.com/marten-seemann/qtls-go1-18/handshake_client_tls13.go +++ b/vendor/github.com/quic-go/qtls-go1-20/handshake_client_tls13.go @@ -8,12 +8,12 @@ import ( "bytes" "context" "crypto" + "crypto/ecdh" "crypto/hmac" "crypto/rsa" "encoding/binary" "errors" "hash" - "sync/atomic" "time" "golang.org/x/crypto/cryptobyte" @@ -24,7 +24,7 @@ type clientHandshakeStateTLS13 struct { ctx context.Context serverHello *serverHelloMsg hello *clientHelloMsg - ecdheParams ecdheParameters + ecdheKey *ecdh.PrivateKey session *clientSessionState earlySecret []byte @@ -39,11 +39,15 @@ type clientHandshakeStateTLS13 struct { trafficSecret []byte // client_application_traffic_secret_0 } -// handshake requires hs.c, hs.hello, hs.serverHello, hs.ecdheParams, and, +// handshake requires hs.c, hs.hello, hs.serverHello, hs.ecdheKey, and, // optionally, hs.session, hs.earlySecret and hs.binderKey to be set. func (hs *clientHandshakeStateTLS13) handshake() error { c := hs.c + if needFIPS() { + return errors.New("tls: internal error: TLS 1.3 reached in FIPS mode") + } + // The server must not select TLS 1.3 in a renegotiation. See RFC 8446, // sections 4.1.2 and 4.1.3. if c.handshakes > 0 { @@ -52,7 +56,7 @@ func (hs *clientHandshakeStateTLS13) handshake() error { } // Consistency check on the presence of a keyShare and its parameters. - if hs.ecdheParams == nil || len(hs.hello.keyShares) != 1 { + if hs.ecdheKey == nil || len(hs.hello.keyShares) != 1 { return c.sendAlert(alertInternalError) } @@ -61,7 +65,10 @@ func (hs *clientHandshakeStateTLS13) handshake() error { } hs.transcript = hs.suite.hash.New() - hs.transcript.Write(hs.hello.marshal()) + + if err := transcriptMsg(hs.hello, hs.transcript); err != nil { + return err + } if bytes.Equal(hs.serverHello.random, helloRetryRequestRandom) { if err := hs.sendDummyChangeCipherSpec(); err != nil { @@ -72,7 +79,9 @@ func (hs *clientHandshakeStateTLS13) handshake() error { } } - hs.transcript.Write(hs.serverHello.marshal()) + if err := transcriptMsg(hs.serverHello, hs.transcript); err != nil { + return err + } c.buffering = true if err := hs.processServerHello(); err != nil { @@ -105,7 +114,7 @@ func (hs *clientHandshakeStateTLS13) handshake() error { return err } - atomic.StoreUint32(&c.handshakeStatus, 1) + c.isHandshakeComplete.Store(true) c.updateConnectionState() return nil } @@ -173,8 +182,7 @@ func (hs *clientHandshakeStateTLS13) sendDummyChangeCipherSpec() error { } hs.sentDummyCCS = true - _, err := hs.c.writeRecord(recordTypeChangeCipherSpec, []byte{1}) - return err + return hs.c.writeChangeCipherRecord() } // processHelloRetryRequest handles the HRR in hs.serverHello, modifies and @@ -189,7 +197,9 @@ func (hs *clientHandshakeStateTLS13) processHelloRetryRequest() error { hs.transcript.Reset() hs.transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))}) hs.transcript.Write(chHash) - hs.transcript.Write(hs.serverHello.marshal()) + if err := transcriptMsg(hs.serverHello, hs.transcript); err != nil { + return err + } // The only HelloRetryRequest extensions we support are key_share and // cookie, and clients must abort the handshake if the HRR would not result @@ -223,21 +233,21 @@ func (hs *clientHandshakeStateTLS13) processHelloRetryRequest() error { c.sendAlert(alertIllegalParameter) return errors.New("tls: server selected unsupported group") } - if hs.ecdheParams.CurveID() == curveID { + if sentID, _ := curveIDForCurve(hs.ecdheKey.Curve()); sentID == curveID { c.sendAlert(alertIllegalParameter) return errors.New("tls: server sent an unnecessary HelloRetryRequest key_share") } - if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok { + if _, ok := curveForCurveID(curveID); !ok { c.sendAlert(alertInternalError) return errors.New("tls: CurvePreferences includes unsupported curve") } - params, err := generateECDHEParameters(c.config.rand(), curveID) + key, err := generateECDHEKey(c.config.rand(), curveID) if err != nil { c.sendAlert(alertInternalError) return err } - hs.ecdheParams = params - hs.hello.keyShares = []keyShare{{group: curveID, data: params.PublicKey()}} + hs.ecdheKey = key + hs.hello.keyShares = []keyShare{{group: curveID, data: key.PublicKey().Bytes()}} } hs.hello.raw = nil @@ -254,10 +264,18 @@ func (hs *clientHandshakeStateTLS13) processHelloRetryRequest() error { transcript := hs.suite.hash.New() transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))}) transcript.Write(chHash) - transcript.Write(hs.serverHello.marshal()) - transcript.Write(hs.hello.marshalWithoutBinders()) + if err := transcriptMsg(hs.serverHello, hs.transcript); err != nil { + return err + } + helloBytes, err := hs.hello.marshalWithoutBinders() + if err != nil { + return err + } + transcript.Write(helloBytes) pskBinders := [][]byte{hs.suite.finishedHash(hs.binderKey, transcript)} - hs.hello.updateBinders(pskBinders) + if err := hs.hello.updateBinders(pskBinders); err != nil { + return err + } } else { // Server selected a cipher suite incompatible with the PSK. hs.hello.pskIdentities = nil @@ -269,13 +287,12 @@ func (hs *clientHandshakeStateTLS13) processHelloRetryRequest() error { c.extraConfig.Rejected0RTT() } hs.hello.earlyData = false // disable 0-RTT - - hs.transcript.Write(hs.hello.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(hs.hello, hs.transcript); err != nil { return err } - msg, err := c.readHandshake() + // serverHelloMsg is not included in the transcript + msg, err := c.readHandshake(nil) if err != nil { return err } @@ -316,7 +333,7 @@ func (hs *clientHandshakeStateTLS13) processServerHello() error { c.sendAlert(alertIllegalParameter) return errors.New("tls: server did not send a key share") } - if hs.serverHello.serverShare.group != hs.ecdheParams.CurveID() { + if sentID, _ := curveIDForCurve(hs.ecdheKey.Curve()); hs.serverHello.serverShare.group != sentID { c.sendAlert(alertIllegalParameter) return errors.New("tls: server selected unsupported group") } @@ -354,8 +371,13 @@ func (hs *clientHandshakeStateTLS13) processServerHello() error { func (hs *clientHandshakeStateTLS13) establishHandshakeKeys() error { c := hs.c - sharedKey := hs.ecdheParams.SharedKey(hs.serverHello.serverShare.data) - if sharedKey == nil { + peerKey, err := hs.ecdheKey.Curve().NewPublicKey(hs.serverHello.serverShare.data) + if err != nil { + c.sendAlert(alertIllegalParameter) + return errors.New("tls: invalid server key share") + } + sharedKey, err := hs.ecdheKey.ECDH(peerKey) + if err != nil { c.sendAlert(alertIllegalParameter) return errors.New("tls: invalid server key share") } @@ -364,6 +386,7 @@ func (hs *clientHandshakeStateTLS13) establishHandshakeKeys() error { if !hs.usingPSK { earlySecret = hs.suite.extract(nil, nil) } + handshakeSecret := hs.suite.extract(sharedKey, hs.suite.deriveSecret(earlySecret, "derived", nil)) @@ -376,7 +399,7 @@ func (hs *clientHandshakeStateTLS13) establishHandshakeKeys() error { c.in.exportKey(EncryptionHandshake, hs.suite, serverSecret) c.in.setTrafficSecret(hs.suite, serverSecret) - err := c.config.writeKeyLog(keyLogLabelClientHandshake, hs.hello.random, clientSecret) + err = c.config.writeKeyLog(keyLogLabelClientHandshake, hs.hello.random, clientSecret) if err != nil { c.sendAlert(alertInternalError) return err @@ -396,7 +419,7 @@ func (hs *clientHandshakeStateTLS13) establishHandshakeKeys() error { func (hs *clientHandshakeStateTLS13) readServerParameters() error { c := hs.c - msg, err := c.readHandshake() + msg, err := c.readHandshake(hs.transcript) if err != nil { return err } @@ -414,7 +437,6 @@ func (hs *clientHandshakeStateTLS13) readServerParameters() error { if hs.c.extraConfig != nil && hs.c.extraConfig.ReceivedExtensions != nil { hs.c.extraConfig.ReceivedExtensions(typeEncryptedExtensions, encryptedExtensions.additionalExtensions) } - hs.transcript.Write(encryptedExtensions.marshal()) if err := checkALPN(hs.hello.alpnProtocols, encryptedExtensions.alpnProtocol); err != nil { c.sendAlert(alertUnsupportedExtension) @@ -450,18 +472,16 @@ func (hs *clientHandshakeStateTLS13) readServerCertificate() error { return nil } - msg, err := c.readHandshake() + msg, err := c.readHandshake(hs.transcript) if err != nil { return err } certReq, ok := msg.(*certificateRequestMsgTLS13) if ok { - hs.transcript.Write(certReq.marshal()) - hs.certReq = certReq - msg, err = c.readHandshake() + msg, err = c.readHandshake(hs.transcript) if err != nil { return err } @@ -476,7 +496,6 @@ func (hs *clientHandshakeStateTLS13) readServerCertificate() error { c.sendAlert(alertDecodeError) return errors.New("tls: received empty certificates message") } - hs.transcript.Write(certMsg.marshal()) c.scts = certMsg.certificate.SignedCertificateTimestamps c.ocspResponse = certMsg.certificate.OCSPStaple @@ -485,7 +504,10 @@ func (hs *clientHandshakeStateTLS13) readServerCertificate() error { return err } - msg, err = c.readHandshake() + // certificateVerifyMsg is included in the transcript, but not until + // after we verify the handshake signature, since the state before + // this message was sent is used. + msg, err = c.readHandshake(nil) if err != nil { return err } @@ -497,7 +519,7 @@ func (hs *clientHandshakeStateTLS13) readServerCertificate() error { } // See RFC 8446, Section 4.4.3. - if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, supportedSignatureAlgorithms) { + if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, supportedSignatureAlgorithms()) { c.sendAlert(alertIllegalParameter) return errors.New("tls: certificate used with invalid signature algorithm") } @@ -516,7 +538,9 @@ func (hs *clientHandshakeStateTLS13) readServerCertificate() error { return errors.New("tls: invalid signature by the server certificate: " + err.Error()) } - hs.transcript.Write(certVerify.marshal()) + if err := transcriptMsg(certVerify, hs.transcript); err != nil { + return err + } return nil } @@ -524,7 +548,10 @@ func (hs *clientHandshakeStateTLS13) readServerCertificate() error { func (hs *clientHandshakeStateTLS13) readServerFinished() error { c := hs.c - msg, err := c.readHandshake() + // finishedMsg is included in the transcript, but not until after we + // check the client version, since the state before this message was + // sent is used during verification. + msg, err := c.readHandshake(nil) if err != nil { return err } @@ -541,7 +568,9 @@ func (hs *clientHandshakeStateTLS13) readServerFinished() error { return errors.New("tls: invalid server finished hash") } - hs.transcript.Write(finished.marshal()) + if err := transcriptMsg(finished, hs.transcript); err != nil { + return err + } // Derive secrets that take context through the server Finished. @@ -591,8 +620,7 @@ func (hs *clientHandshakeStateTLS13) sendClientCertificate() error { certMsg.scts = hs.certReq.scts && len(cert.SignedCertificateTimestamps) > 0 certMsg.ocspStapling = hs.certReq.ocspStapling && len(cert.OCSPStaple) > 0 - hs.transcript.Write(certMsg.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(certMsg, hs.transcript); err != nil { return err } @@ -629,8 +657,7 @@ func (hs *clientHandshakeStateTLS13) sendClientCertificate() error { } certVerifyMsg.signature = sig - hs.transcript.Write(certVerifyMsg.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certVerifyMsg.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(certVerifyMsg, hs.transcript); err != nil { return err } @@ -644,8 +671,7 @@ func (hs *clientHandshakeStateTLS13) sendClientFinished() error { verifyData: hs.suite.finishedHash(c.out.trafficSecret, hs.transcript), } - hs.transcript.Write(finished.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(finished, hs.transcript); err != nil { return err } diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/handshake_messages.go b/vendor/github.com/quic-go/qtls-go1-20/handshake_messages.go similarity index 75% rename from vendor/github.com/marten-seemann/qtls-go1-18/handshake_messages.go rename to vendor/github.com/quic-go/qtls-go1-20/handshake_messages.go index 5f87d4b8..c69fcefd 100644 --- a/vendor/github.com/marten-seemann/qtls-go1-18/handshake_messages.go +++ b/vendor/github.com/quic-go/qtls-go1-20/handshake_messages.go @@ -5,6 +5,7 @@ package qtls import ( + "errors" "fmt" "strings" @@ -95,9 +96,187 @@ type clientHelloMsg struct { additionalExtensions []Extension } -func (m *clientHelloMsg) marshal() []byte { +func (m *clientHelloMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil + } + + var exts cryptobyte.Builder + if len(m.serverName) > 0 { + // RFC 6066, Section 3 + exts.AddUint16(extensionServerName) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint8(0) // name_type = host_name + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes([]byte(m.serverName)) + }) + }) + }) + } + if m.ocspStapling { + // RFC 4366, Section 3.6 + exts.AddUint16(extensionStatusRequest) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint8(1) // status_type = ocsp + exts.AddUint16(0) // empty responder_id_list + exts.AddUint16(0) // empty request_extensions + }) + } + if len(m.supportedCurves) > 0 { + // RFC 4492, sections 5.1.1 and RFC 8446, Section 4.2.7 + exts.AddUint16(extensionSupportedCurves) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + for _, curve := range m.supportedCurves { + exts.AddUint16(uint16(curve)) + } + }) + }) + } + if len(m.supportedPoints) > 0 { + // RFC 4492, Section 5.1.2 + exts.AddUint16(extensionSupportedPoints) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(m.supportedPoints) + }) + }) + } + if m.ticketSupported { + // RFC 5077, Section 3.2 + exts.AddUint16(extensionSessionTicket) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(m.sessionTicket) + }) + } + if len(m.supportedSignatureAlgorithms) > 0 { + // RFC 5246, Section 7.4.1.4.1 + exts.AddUint16(extensionSignatureAlgorithms) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + for _, sigAlgo := range m.supportedSignatureAlgorithms { + exts.AddUint16(uint16(sigAlgo)) + } + }) + }) + } + if len(m.supportedSignatureAlgorithmsCert) > 0 { + // RFC 8446, Section 4.2.3 + exts.AddUint16(extensionSignatureAlgorithmsCert) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + for _, sigAlgo := range m.supportedSignatureAlgorithmsCert { + exts.AddUint16(uint16(sigAlgo)) + } + }) + }) + } + if m.secureRenegotiationSupported { + // RFC 5746, Section 3.2 + exts.AddUint16(extensionRenegotiationInfo) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(m.secureRenegotiation) + }) + }) + } + if len(m.alpnProtocols) > 0 { + // RFC 7301, Section 3.1 + exts.AddUint16(extensionALPN) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + for _, proto := range m.alpnProtocols { + exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes([]byte(proto)) + }) + } + }) + }) + } + if m.scts { + // RFC 6962, Section 3.3.1 + exts.AddUint16(extensionSCT) + exts.AddUint16(0) // empty extension_data + } + if len(m.supportedVersions) > 0 { + // RFC 8446, Section 4.2.1 + exts.AddUint16(extensionSupportedVersions) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) { + for _, vers := range m.supportedVersions { + exts.AddUint16(vers) + } + }) + }) + } + if len(m.cookie) > 0 { + // RFC 8446, Section 4.2.2 + exts.AddUint16(extensionCookie) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(m.cookie) + }) + }) + } + if len(m.keyShares) > 0 { + // RFC 8446, Section 4.2.8 + exts.AddUint16(extensionKeyShare) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + for _, ks := range m.keyShares { + exts.AddUint16(uint16(ks.group)) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(ks.data) + }) + } + }) + }) + } + if m.earlyData { + // RFC 8446, Section 4.2.10 + exts.AddUint16(extensionEarlyData) + exts.AddUint16(0) // empty extension_data + } + if len(m.pskModes) > 0 { + // RFC 8446, Section 4.2.9 + exts.AddUint16(extensionPSKModes) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(m.pskModes) + }) + }) + } + for _, ext := range m.additionalExtensions { + exts.AddUint16(ext.Type) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(ext.Data) + }) + } + if len(m.pskIdentities) > 0 { // pre_shared_key must be the last extension + // RFC 8446, Section 4.2.11 + exts.AddUint16(extensionPreSharedKey) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + for _, psk := range m.pskIdentities { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(psk.label) + }) + exts.AddUint32(psk.obfuscatedTicketAge) + } + }) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + for _, binder := range m.pskBinders { + exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(binder) + }) + } + }) + }) + } + extBytes, err := exts.Bytes() + if err != nil { + return nil, err } var b cryptobyte.Builder @@ -117,225 +296,53 @@ func (m *clientHelloMsg) marshal() []byte { b.AddBytes(m.compressionMethods) }) - // If extensions aren't present, omit them. - var extensionsPresent bool - bWithoutExtensions := *b - - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - if len(m.serverName) > 0 { - // RFC 6066, Section 3 - b.AddUint16(extensionServerName) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8(0) // name_type = host_name - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes([]byte(m.serverName)) - }) - }) - }) - } - if m.ocspStapling { - // RFC 4366, Section 3.6 - b.AddUint16(extensionStatusRequest) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8(1) // status_type = ocsp - b.AddUint16(0) // empty responder_id_list - b.AddUint16(0) // empty request_extensions - }) - } - if len(m.supportedCurves) > 0 { - // RFC 4492, sections 5.1.1 and RFC 8446, Section 4.2.7 - b.AddUint16(extensionSupportedCurves) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, curve := range m.supportedCurves { - b.AddUint16(uint16(curve)) - } - }) - }) - } - if len(m.supportedPoints) > 0 { - // RFC 4492, Section 5.1.2 - b.AddUint16(extensionSupportedPoints) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.supportedPoints) - }) - }) - } - if m.ticketSupported { - // RFC 5077, Section 3.2 - b.AddUint16(extensionSessionTicket) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.sessionTicket) - }) - } - if len(m.supportedSignatureAlgorithms) > 0 { - // RFC 5246, Section 7.4.1.4.1 - b.AddUint16(extensionSignatureAlgorithms) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, sigAlgo := range m.supportedSignatureAlgorithms { - b.AddUint16(uint16(sigAlgo)) - } - }) - }) - } - if len(m.supportedSignatureAlgorithmsCert) > 0 { - // RFC 8446, Section 4.2.3 - b.AddUint16(extensionSignatureAlgorithmsCert) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, sigAlgo := range m.supportedSignatureAlgorithmsCert { - b.AddUint16(uint16(sigAlgo)) - } - }) - }) - } - if m.secureRenegotiationSupported { - // RFC 5746, Section 3.2 - b.AddUint16(extensionRenegotiationInfo) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.secureRenegotiation) - }) - }) - } - if len(m.alpnProtocols) > 0 { - // RFC 7301, Section 3.1 - b.AddUint16(extensionALPN) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, proto := range m.alpnProtocols { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes([]byte(proto)) - }) - } - }) - }) - } - if m.scts { - // RFC 6962, Section 3.3.1 - b.AddUint16(extensionSCT) - b.AddUint16(0) // empty extension_data - } - if len(m.supportedVersions) > 0 { - // RFC 8446, Section 4.2.1 - b.AddUint16(extensionSupportedVersions) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - for _, vers := range m.supportedVersions { - b.AddUint16(vers) - } - }) - }) - } - if len(m.cookie) > 0 { - // RFC 8446, Section 4.2.2 - b.AddUint16(extensionCookie) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.cookie) - }) - }) - } - if len(m.keyShares) > 0 { - // RFC 8446, Section 4.2.8 - b.AddUint16(extensionKeyShare) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, ks := range m.keyShares { - b.AddUint16(uint16(ks.group)) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(ks.data) - }) - } - }) - }) - } - if m.earlyData { - // RFC 8446, Section 4.2.10 - b.AddUint16(extensionEarlyData) - b.AddUint16(0) // empty extension_data - } - if len(m.pskModes) > 0 { - // RFC 8446, Section 4.2.9 - b.AddUint16(extensionPSKModes) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.pskModes) - }) - }) - } - for _, ext := range m.additionalExtensions { - b.AddUint16(ext.Type) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(ext.Data) - }) - } - if len(m.pskIdentities) > 0 { // pre_shared_key must be the last extension - // RFC 8446, Section 4.2.11 - b.AddUint16(extensionPreSharedKey) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, psk := range m.pskIdentities { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(psk.label) - }) - b.AddUint32(psk.obfuscatedTicketAge) - } - }) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, binder := range m.pskBinders { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(binder) - }) - } - }) - }) - } - - extensionsPresent = len(b.BytesOrPanic()) > 2 - }) - - if !extensionsPresent { - *b = bWithoutExtensions + if len(extBytes) > 0 { + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes(extBytes) + }) } }) - m.raw = b.BytesOrPanic() - return m.raw + m.raw, err = b.Bytes() + return m.raw, err } // marshalWithoutBinders returns the ClientHello through the // PreSharedKeyExtension.identities field, according to RFC 8446, Section // 4.2.11.2. Note that m.pskBinders must be set to slices of the correct length. -func (m *clientHelloMsg) marshalWithoutBinders() []byte { +func (m *clientHelloMsg) marshalWithoutBinders() ([]byte, error) { bindersLen := 2 // uint16 length prefix for _, binder := range m.pskBinders { bindersLen += 1 // uint8 length prefix bindersLen += len(binder) } - fullMessage := m.marshal() - return fullMessage[:len(fullMessage)-bindersLen] + fullMessage, err := m.marshal() + if err != nil { + return nil, err + } + return fullMessage[:len(fullMessage)-bindersLen], nil } // updateBinders updates the m.pskBinders field, if necessary updating the // cached marshaled representation. The supplied binders must have the same // length as the current m.pskBinders. -func (m *clientHelloMsg) updateBinders(pskBinders [][]byte) { +func (m *clientHelloMsg) updateBinders(pskBinders [][]byte) error { if len(pskBinders) != len(m.pskBinders) { - panic("tls: internal error: pskBinders length mismatch") + return errors.New("tls: internal error: pskBinders length mismatch") } for i := range m.pskBinders { if len(pskBinders[i]) != len(m.pskBinders[i]) { - panic("tls: internal error: pskBinders length mismatch") + return errors.New("tls: internal error: pskBinders length mismatch") } } m.pskBinders = pskBinders if m.raw != nil { - lenWithoutBinders := len(m.marshalWithoutBinders()) + helloBytes, err := m.marshalWithoutBinders() + if err != nil { + return err + } + lenWithoutBinders := len(helloBytes) b := cryptobyte.NewFixedBuilder(m.raw[:lenWithoutBinders]) b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { for _, binder := range m.pskBinders { @@ -345,9 +352,11 @@ func (m *clientHelloMsg) updateBinders(pskBinders [][]byte) { } }) if out, err := b.Bytes(); err != nil || len(out) != len(m.raw) { - panic("tls: internal error: failed to update binders") + return errors.New("tls: internal error: failed to update binders") } } + + return nil } func (m *clientHelloMsg) unmarshal(data []byte) bool { @@ -391,15 +400,21 @@ func (m *clientHelloMsg) unmarshal(data []byte) bool { return false } + seenExts := make(map[uint16]bool) for !extensions.Empty() { - var ext uint16 + var extension uint16 var extData cryptobyte.String - if !extensions.ReadUint16(&ext) || + if !extensions.ReadUint16(&extension) || !extensions.ReadUint16LengthPrefixed(&extData) { return false } - switch ext { + if seenExts[extension] { + return false + } + seenExts[extension] = true + + switch extension { case extensionServerName: // RFC 6066, Section 3 var nameList cryptobyte.String @@ -583,7 +598,7 @@ func (m *clientHelloMsg) unmarshal(data []byte) bool { m.pskBinders = append(m.pskBinders, binder) } default: - m.additionalExtensions = append(m.additionalExtensions, Extension{Type: ext, Data: extData}) + m.additionalExtensions = append(m.additionalExtensions, Extension{Type: extension, Data: extData}) continue } @@ -619,9 +634,98 @@ type serverHelloMsg struct { selectedGroup CurveID } -func (m *serverHelloMsg) marshal() []byte { +func (m *serverHelloMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil + } + + var exts cryptobyte.Builder + if m.ocspStapling { + exts.AddUint16(extensionStatusRequest) + exts.AddUint16(0) // empty extension_data + } + if m.ticketSupported { + exts.AddUint16(extensionSessionTicket) + exts.AddUint16(0) // empty extension_data + } + if m.secureRenegotiationSupported { + exts.AddUint16(extensionRenegotiationInfo) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(m.secureRenegotiation) + }) + }) + } + if len(m.alpnProtocol) > 0 { + exts.AddUint16(extensionALPN) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes([]byte(m.alpnProtocol)) + }) + }) + }) + } + if len(m.scts) > 0 { + exts.AddUint16(extensionSCT) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + for _, sct := range m.scts { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(sct) + }) + } + }) + }) + } + if m.supportedVersion != 0 { + exts.AddUint16(extensionSupportedVersions) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16(m.supportedVersion) + }) + } + if m.serverShare.group != 0 { + exts.AddUint16(extensionKeyShare) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16(uint16(m.serverShare.group)) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(m.serverShare.data) + }) + }) + } + if m.selectedIdentityPresent { + exts.AddUint16(extensionPreSharedKey) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16(m.selectedIdentity) + }) + } + + if len(m.cookie) > 0 { + exts.AddUint16(extensionCookie) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(m.cookie) + }) + }) + } + if m.selectedGroup != 0 { + exts.AddUint16(extensionKeyShare) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint16(uint16(m.selectedGroup)) + }) + } + if len(m.supportedPoints) > 0 { + exts.AddUint16(extensionSupportedPoints) + exts.AddUint16LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddUint8LengthPrefixed(func(exts *cryptobyte.Builder) { + exts.AddBytes(m.supportedPoints) + }) + }) + } + + extBytes, err := exts.Bytes() + if err != nil { + return nil, err } var b cryptobyte.Builder @@ -635,104 +739,15 @@ func (m *serverHelloMsg) marshal() []byte { b.AddUint16(m.cipherSuite) b.AddUint8(m.compressionMethod) - // If extensions aren't present, omit them. - var extensionsPresent bool - bWithoutExtensions := *b - - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - if m.ocspStapling { - b.AddUint16(extensionStatusRequest) - b.AddUint16(0) // empty extension_data - } - if m.ticketSupported { - b.AddUint16(extensionSessionTicket) - b.AddUint16(0) // empty extension_data - } - if m.secureRenegotiationSupported { - b.AddUint16(extensionRenegotiationInfo) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.secureRenegotiation) - }) - }) - } - if len(m.alpnProtocol) > 0 { - b.AddUint16(extensionALPN) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes([]byte(m.alpnProtocol)) - }) - }) - }) - } - if len(m.scts) > 0 { - b.AddUint16(extensionSCT) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, sct := range m.scts { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(sct) - }) - } - }) - }) - } - if m.supportedVersion != 0 { - b.AddUint16(extensionSupportedVersions) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16(m.supportedVersion) - }) - } - if m.serverShare.group != 0 { - b.AddUint16(extensionKeyShare) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16(uint16(m.serverShare.group)) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.serverShare.data) - }) - }) - } - if m.selectedIdentityPresent { - b.AddUint16(extensionPreSharedKey) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16(m.selectedIdentity) - }) - } - - if len(m.cookie) > 0 { - b.AddUint16(extensionCookie) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.cookie) - }) - }) - } - if m.selectedGroup != 0 { - b.AddUint16(extensionKeyShare) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16(uint16(m.selectedGroup)) - }) - } - if len(m.supportedPoints) > 0 { - b.AddUint16(extensionSupportedPoints) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.supportedPoints) - }) - }) - } - - extensionsPresent = len(b.BytesOrPanic()) > 2 - }) - - if !extensionsPresent { - *b = bWithoutExtensions + if len(extBytes) > 0 { + b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes(extBytes) + }) } }) - m.raw = b.BytesOrPanic() - return m.raw + m.raw, err = b.Bytes() + return m.raw, err } func (m *serverHelloMsg) unmarshal(data []byte) bool { @@ -757,6 +772,7 @@ func (m *serverHelloMsg) unmarshal(data []byte) bool { return false } + seenExts := make(map[uint16]bool) for !extensions.Empty() { var extension uint16 var extData cryptobyte.String @@ -765,6 +781,11 @@ func (m *serverHelloMsg) unmarshal(data []byte) bool { return false } + if seenExts[extension] { + return false + } + seenExts[extension] = true + switch extension { case extensionStatusRequest: m.ocspStapling = true @@ -853,9 +874,9 @@ type encryptedExtensionsMsg struct { additionalExtensions []Extension } -func (m *encryptedExtensionsMsg) marshal() []byte { +func (m *encryptedExtensionsMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } var b cryptobyte.Builder @@ -886,8 +907,9 @@ func (m *encryptedExtensionsMsg) marshal() []byte { }) }) - m.raw = b.BytesOrPanic() - return m.raw + var err error + m.raw, err = b.Bytes() + return m.raw, err } func (m *encryptedExtensionsMsg) unmarshal(data []byte) bool { @@ -937,10 +959,10 @@ func (m *encryptedExtensionsMsg) unmarshal(data []byte) bool { type endOfEarlyDataMsg struct{} -func (m *endOfEarlyDataMsg) marshal() []byte { +func (m *endOfEarlyDataMsg) marshal() ([]byte, error) { x := make([]byte, 4) x[0] = typeEndOfEarlyData - return x + return x, nil } func (m *endOfEarlyDataMsg) unmarshal(data []byte) bool { @@ -952,9 +974,9 @@ type keyUpdateMsg struct { updateRequested bool } -func (m *keyUpdateMsg) marshal() []byte { +func (m *keyUpdateMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } var b cryptobyte.Builder @@ -967,8 +989,9 @@ func (m *keyUpdateMsg) marshal() []byte { } }) - m.raw = b.BytesOrPanic() - return m.raw + var err error + m.raw, err = b.Bytes() + return m.raw, err } func (m *keyUpdateMsg) unmarshal(data []byte) bool { @@ -1000,9 +1023,9 @@ type newSessionTicketMsgTLS13 struct { maxEarlyData uint32 } -func (m *newSessionTicketMsgTLS13) marshal() []byte { +func (m *newSessionTicketMsgTLS13) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } var b cryptobyte.Builder @@ -1027,8 +1050,9 @@ func (m *newSessionTicketMsgTLS13) marshal() []byte { }) }) - m.raw = b.BytesOrPanic() - return m.raw + var err error + m.raw, err = b.Bytes() + return m.raw, err } func (m *newSessionTicketMsgTLS13) unmarshal(data []byte) bool { @@ -1081,9 +1105,9 @@ type certificateRequestMsgTLS13 struct { certificateAuthorities [][]byte } -func (m *certificateRequestMsgTLS13) marshal() []byte { +func (m *certificateRequestMsgTLS13) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } var b cryptobyte.Builder @@ -1142,8 +1166,9 @@ func (m *certificateRequestMsgTLS13) marshal() []byte { }) }) - m.raw = b.BytesOrPanic() - return m.raw + var err error + m.raw, err = b.Bytes() + return m.raw, err } func (m *certificateRequestMsgTLS13) unmarshal(data []byte) bool { @@ -1227,9 +1252,9 @@ type certificateMsg struct { certificates [][]byte } -func (m *certificateMsg) marshal() (x []byte) { +func (m *certificateMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } var i int @@ -1238,7 +1263,7 @@ func (m *certificateMsg) marshal() (x []byte) { } length := 3 + 3*len(m.certificates) + i - x = make([]byte, 4+length) + x := make([]byte, 4+length) x[0] = typeCertificate x[1] = uint8(length >> 16) x[2] = uint8(length >> 8) @@ -1259,7 +1284,7 @@ func (m *certificateMsg) marshal() (x []byte) { } m.raw = x - return + return m.raw, nil } func (m *certificateMsg) unmarshal(data []byte) bool { @@ -1306,9 +1331,9 @@ type certificateMsgTLS13 struct { scts bool } -func (m *certificateMsgTLS13) marshal() []byte { +func (m *certificateMsgTLS13) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } var b cryptobyte.Builder @@ -1326,8 +1351,9 @@ func (m *certificateMsgTLS13) marshal() []byte { marshalCertificate(b, certificate) }) - m.raw = b.BytesOrPanic() - return m.raw + var err error + m.raw, err = b.Bytes() + return m.raw, err } func marshalCertificate(b *cryptobyte.Builder, certificate Certificate) { @@ -1450,9 +1476,9 @@ type serverKeyExchangeMsg struct { key []byte } -func (m *serverKeyExchangeMsg) marshal() []byte { +func (m *serverKeyExchangeMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } length := len(m.key) x := make([]byte, length+4) @@ -1463,7 +1489,7 @@ func (m *serverKeyExchangeMsg) marshal() []byte { copy(x[4:], m.key) m.raw = x - return x + return x, nil } func (m *serverKeyExchangeMsg) unmarshal(data []byte) bool { @@ -1480,9 +1506,9 @@ type certificateStatusMsg struct { response []byte } -func (m *certificateStatusMsg) marshal() []byte { +func (m *certificateStatusMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } var b cryptobyte.Builder @@ -1494,8 +1520,9 @@ func (m *certificateStatusMsg) marshal() []byte { }) }) - m.raw = b.BytesOrPanic() - return m.raw + var err error + m.raw, err = b.Bytes() + return m.raw, err } func (m *certificateStatusMsg) unmarshal(data []byte) bool { @@ -1514,10 +1541,10 @@ func (m *certificateStatusMsg) unmarshal(data []byte) bool { type serverHelloDoneMsg struct{} -func (m *serverHelloDoneMsg) marshal() []byte { +func (m *serverHelloDoneMsg) marshal() ([]byte, error) { x := make([]byte, 4) x[0] = typeServerHelloDone - return x + return x, nil } func (m *serverHelloDoneMsg) unmarshal(data []byte) bool { @@ -1529,9 +1556,9 @@ type clientKeyExchangeMsg struct { ciphertext []byte } -func (m *clientKeyExchangeMsg) marshal() []byte { +func (m *clientKeyExchangeMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } length := len(m.ciphertext) x := make([]byte, length+4) @@ -1542,7 +1569,7 @@ func (m *clientKeyExchangeMsg) marshal() []byte { copy(x[4:], m.ciphertext) m.raw = x - return x + return x, nil } func (m *clientKeyExchangeMsg) unmarshal(data []byte) bool { @@ -1563,9 +1590,9 @@ type finishedMsg struct { verifyData []byte } -func (m *finishedMsg) marshal() []byte { +func (m *finishedMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } var b cryptobyte.Builder @@ -1574,8 +1601,9 @@ func (m *finishedMsg) marshal() []byte { b.AddBytes(m.verifyData) }) - m.raw = b.BytesOrPanic() - return m.raw + var err error + m.raw, err = b.Bytes() + return m.raw, err } func (m *finishedMsg) unmarshal(data []byte) bool { @@ -1597,9 +1625,9 @@ type certificateRequestMsg struct { certificateAuthorities [][]byte } -func (m *certificateRequestMsg) marshal() (x []byte) { +func (m *certificateRequestMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } // See RFC 4346, Section 7.4.4. @@ -1614,7 +1642,7 @@ func (m *certificateRequestMsg) marshal() (x []byte) { length += 2 + 2*len(m.supportedSignatureAlgorithms) } - x = make([]byte, 4+length) + x := make([]byte, 4+length) x[0] = typeCertificateRequest x[1] = uint8(length >> 16) x[2] = uint8(length >> 8) @@ -1649,7 +1677,7 @@ func (m *certificateRequestMsg) marshal() (x []byte) { } m.raw = x - return + return m.raw, nil } func (m *certificateRequestMsg) unmarshal(data []byte) bool { @@ -1735,9 +1763,9 @@ type certificateVerifyMsg struct { signature []byte } -func (m *certificateVerifyMsg) marshal() (x []byte) { +func (m *certificateVerifyMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } var b cryptobyte.Builder @@ -1751,8 +1779,9 @@ func (m *certificateVerifyMsg) marshal() (x []byte) { }) }) - m.raw = b.BytesOrPanic() - return m.raw + var err error + m.raw, err = b.Bytes() + return m.raw, err } func (m *certificateVerifyMsg) unmarshal(data []byte) bool { @@ -1775,15 +1804,15 @@ type newSessionTicketMsg struct { ticket []byte } -func (m *newSessionTicketMsg) marshal() (x []byte) { +func (m *newSessionTicketMsg) marshal() ([]byte, error) { if m.raw != nil { - return m.raw + return m.raw, nil } // See RFC 5077, Section 3.3. ticketLen := len(m.ticket) length := 2 + 4 + ticketLen - x = make([]byte, 4+length) + x := make([]byte, 4+length) x[0] = typeNewSessionTicket x[1] = uint8(length >> 16) x[2] = uint8(length >> 8) @@ -1794,7 +1823,7 @@ func (m *newSessionTicketMsg) marshal() (x []byte) { m.raw = x - return + return m.raw, nil } func (m *newSessionTicketMsg) unmarshal(data []byte) bool { @@ -1822,10 +1851,25 @@ func (m *newSessionTicketMsg) unmarshal(data []byte) bool { type helloRequestMsg struct { } -func (*helloRequestMsg) marshal() []byte { - return []byte{typeHelloRequest, 0, 0, 0} +func (*helloRequestMsg) marshal() ([]byte, error) { + return []byte{typeHelloRequest, 0, 0, 0}, nil } func (*helloRequestMsg) unmarshal(data []byte) bool { return len(data) == 4 } + +type transcriptHash interface { + Write([]byte) (int, error) +} + +// transcriptMsg is a helper used to marshal and hash messages which typically +// are not written to the wire, and as such aren't hashed during Conn.writeRecord. +func transcriptMsg(msg handshakeMessage, h transcriptHash) error { + data, err := msg.marshal() + if err != nil { + return err + } + h.Write(data) + return nil +} diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/handshake_server.go b/vendor/github.com/quic-go/qtls-go1-20/handshake_server.go similarity index 91% rename from vendor/github.com/marten-seemann/qtls-go1-18/handshake_server.go rename to vendor/github.com/quic-go/qtls-go1-20/handshake_server.go index a6519d7f..05321cfb 100644 --- a/vendor/github.com/marten-seemann/qtls-go1-18/handshake_server.go +++ b/vendor/github.com/quic-go/qtls-go1-20/handshake_server.go @@ -16,7 +16,6 @@ import ( "fmt" "hash" "io" - "sync/atomic" "time" ) @@ -130,7 +129,7 @@ func (hs *serverHandshakeState) handshake() error { } c.ekm = ekmFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.clientHello.random, hs.hello.random) - atomic.StoreUint32(&c.handshakeStatus, 1) + c.isHandshakeComplete.Store(true) c.updateConnectionState() return nil @@ -138,7 +137,9 @@ func (hs *serverHandshakeState) handshake() error { // readClientHello reads a ClientHello message and selects the protocol version. func (c *Conn) readClientHello(ctx context.Context) (*clientHelloMsg, error) { - msg, err := c.readHandshake() + // clientHelloMsg is included in the transcript, but we haven't initialized + // it yet. The respective handshake functions will record it themselves. + msg, err := c.readHandshake(nil) if err != nil { return nil, err } @@ -494,9 +495,10 @@ func (hs *serverHandshakeState) doResumeHandshake() error { hs.hello.ticketSupported = hs.sessionState.usedOldKey hs.finishedHash = newFinishedHash(c.vers, hs.suite) hs.finishedHash.discardHandshakeBuffer() - hs.finishedHash.Write(hs.clientHello.marshal()) - hs.finishedHash.Write(hs.hello.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil { + if err := transcriptMsg(hs.clientHello, &hs.finishedHash); err != nil { + return err + } + if _, err := hs.c.writeHandshakeRecord(hs.hello, &hs.finishedHash); err != nil { return err } @@ -534,24 +536,23 @@ func (hs *serverHandshakeState) doFullHandshake() error { // certificates won't be used. hs.finishedHash.discardHandshakeBuffer() } - hs.finishedHash.Write(hs.clientHello.marshal()) - hs.finishedHash.Write(hs.hello.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil { + if err := transcriptMsg(hs.clientHello, &hs.finishedHash); err != nil { + return err + } + if _, err := hs.c.writeHandshakeRecord(hs.hello, &hs.finishedHash); err != nil { return err } certMsg := new(certificateMsg) certMsg.certificates = hs.cert.Certificate - hs.finishedHash.Write(certMsg.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(certMsg, &hs.finishedHash); err != nil { return err } if hs.hello.ocspStapling { certStatus := new(certificateStatusMsg) certStatus.response = hs.cert.OCSPStaple - hs.finishedHash.Write(certStatus.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certStatus.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(certStatus, &hs.finishedHash); err != nil { return err } } @@ -563,8 +564,7 @@ func (hs *serverHandshakeState) doFullHandshake() error { return err } if skx != nil { - hs.finishedHash.Write(skx.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, skx.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(skx, &hs.finishedHash); err != nil { return err } } @@ -579,7 +579,7 @@ func (hs *serverHandshakeState) doFullHandshake() error { } if c.vers >= VersionTLS12 { certReq.hasSignatureAlgorithm = true - certReq.supportedSignatureAlgorithms = supportedSignatureAlgorithms + certReq.supportedSignatureAlgorithms = supportedSignatureAlgorithms() } // An empty list of certificateAuthorities signals to @@ -590,15 +590,13 @@ func (hs *serverHandshakeState) doFullHandshake() error { if c.config.ClientCAs != nil { certReq.certificateAuthorities = c.config.ClientCAs.Subjects() } - hs.finishedHash.Write(certReq.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certReq.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(certReq, &hs.finishedHash); err != nil { return err } } helloDone := new(serverHelloDoneMsg) - hs.finishedHash.Write(helloDone.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, helloDone.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(helloDone, &hs.finishedHash); err != nil { return err } @@ -608,7 +606,7 @@ func (hs *serverHandshakeState) doFullHandshake() error { var pub crypto.PublicKey // public key for client auth, if any - msg, err := c.readHandshake() + msg, err := c.readHandshake(&hs.finishedHash) if err != nil { return err } @@ -621,7 +619,6 @@ func (hs *serverHandshakeState) doFullHandshake() error { c.sendAlert(alertUnexpectedMessage) return unexpectedMessageError(certMsg, msg) } - hs.finishedHash.Write(certMsg.marshal()) if err := c.processCertsFromClient(Certificate{ Certificate: certMsg.certificates, @@ -632,7 +629,7 @@ func (hs *serverHandshakeState) doFullHandshake() error { pub = c.peerCertificates[0].PublicKey } - msg, err = c.readHandshake() + msg, err = c.readHandshake(&hs.finishedHash) if err != nil { return err } @@ -650,7 +647,6 @@ func (hs *serverHandshakeState) doFullHandshake() error { c.sendAlert(alertUnexpectedMessage) return unexpectedMessageError(ckx, msg) } - hs.finishedHash.Write(ckx.marshal()) preMasterSecret, err := keyAgreement.processClientKeyExchange(c.config, hs.cert, ckx, c.vers) if err != nil { @@ -670,7 +666,10 @@ func (hs *serverHandshakeState) doFullHandshake() error { // to the client's certificate. This allows us to verify that the client is in // possession of the private key of the certificate. if len(c.peerCertificates) > 0 { - msg, err = c.readHandshake() + // certificateVerifyMsg is included in the transcript, but not until + // after we verify the handshake signature, since the state before + // this message was sent is used. + msg, err = c.readHandshake(nil) if err != nil { return err } @@ -699,13 +698,15 @@ func (hs *serverHandshakeState) doFullHandshake() error { } } - signed := hs.finishedHash.hashForClientCertificate(sigType, sigHash, hs.masterSecret) + signed := hs.finishedHash.hashForClientCertificate(sigType, sigHash) if err := verifyHandshakeSignature(sigType, pub, sigHash, signed, certVerify.signature); err != nil { c.sendAlert(alertDecryptError) return errors.New("tls: invalid signature by the client certificate: " + err.Error()) } - hs.finishedHash.Write(certVerify.marshal()) + if err := transcriptMsg(certVerify, &hs.finishedHash); err != nil { + return err + } } hs.finishedHash.discardHandshakeBuffer() @@ -745,7 +746,10 @@ func (hs *serverHandshakeState) readFinished(out []byte) error { return err } - msg, err := c.readHandshake() + // finishedMsg is included in the transcript, but not until after we + // check the client version, since the state before this message was + // sent is used during verification. + msg, err := c.readHandshake(nil) if err != nil { return err } @@ -762,7 +766,10 @@ func (hs *serverHandshakeState) readFinished(out []byte) error { return errors.New("tls: client's Finished message is incorrect") } - hs.finishedHash.Write(clientFinished.marshal()) + if err := transcriptMsg(clientFinished, &hs.finishedHash); err != nil { + return err + } + copy(out, verify) return nil } @@ -796,14 +803,16 @@ func (hs *serverHandshakeState) sendSessionTicket() error { masterSecret: hs.masterSecret, certificates: certsFromClient, } - var err error - m.ticket, err = c.encryptTicket(state.marshal()) + stateBytes, err := state.marshal() + if err != nil { + return err + } + m.ticket, err = c.encryptTicket(stateBytes) if err != nil { return err } - hs.finishedHash.Write(m.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, m.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(m, &hs.finishedHash); err != nil { return err } @@ -813,14 +822,13 @@ func (hs *serverHandshakeState) sendSessionTicket() error { func (hs *serverHandshakeState) sendFinished(out []byte) error { c := hs.c - if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil { + if err := c.writeChangeCipherRecord(); err != nil { return err } finished := new(finishedMsg) finished.verifyData = hs.finishedHash.serverSum(hs.masterSecret) - hs.finishedHash.Write(finished.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(finished, &hs.finishedHash); err != nil { return err } @@ -841,6 +849,10 @@ func (c *Conn) processCertsFromClient(certificate Certificate) error { c.sendAlert(alertBadCertificate) return errors.New("tls: failed to parse client certificate: " + err.Error()) } + if certs[i].PublicKeyAlgorithm == x509.RSA && certs[i].PublicKey.(*rsa.PublicKey).N.BitLen() > maxRSAKeySize { + c.sendAlert(alertBadCertificate) + return fmt.Errorf("tls: client sent certificate containing RSA key larger than %d bits", maxRSAKeySize) + } } if len(certs) == 0 && requiresClientCert(c.config.ClientAuth) { @@ -863,7 +875,7 @@ func (c *Conn) processCertsFromClient(certificate Certificate) error { chains, err := certs[0].Verify(opts) if err != nil { c.sendAlert(alertBadCertificate) - return errors.New("tls: failed to verify client certificate: " + err.Error()) + return &CertificateVerificationError{UnverifiedCertificates: certs, Err: err} } c.verifiedChains = chains diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/handshake_server_tls13.go b/vendor/github.com/quic-go/qtls-go1-20/handshake_server_tls13.go similarity index 90% rename from vendor/github.com/marten-seemann/qtls-go1-18/handshake_server_tls13.go rename to vendor/github.com/quic-go/qtls-go1-20/handshake_server_tls13.go index 7ce09c37..6189c780 100644 --- a/vendor/github.com/marten-seemann/qtls-go1-18/handshake_server_tls13.go +++ b/vendor/github.com/quic-go/qtls-go1-20/handshake_server_tls13.go @@ -13,7 +13,6 @@ import ( "errors" "hash" "io" - "sync/atomic" "time" ) @@ -46,6 +45,10 @@ type serverHandshakeStateTLS13 struct { func (hs *serverHandshakeStateTLS13) handshake() error { c := hs.c + if needFIPS() { + return errors.New("tls: internal error: TLS 1.3 reached in FIPS mode") + } + // For an overview of the TLS 1.3 handshake, see RFC 8446, Section 2. if err := hs.processClientHello(); err != nil { return err @@ -81,7 +84,7 @@ func (hs *serverHandshakeStateTLS13) handshake() error { return err } - atomic.StoreUint32(&c.handshakeStatus, 1) + c.isHandshakeComplete.Store(true) c.updateConnectionState() return nil } @@ -143,27 +146,14 @@ func (hs *serverHandshakeStateTLS13) processClientHello() error { hs.hello.sessionId = hs.clientHello.sessionId hs.hello.compressionMethod = compressionNone - if hs.suite == nil { - var preferenceList []uint16 - for _, suiteID := range c.config.CipherSuites { - for _, suite := range cipherSuitesTLS13 { - if suite.id == suiteID { - preferenceList = append(preferenceList, suiteID) - break - } - } - } - if len(preferenceList) == 0 { - preferenceList = defaultCipherSuitesTLS13 - if !hasAESGCMHardwareSupport || !aesgcmPreferred(hs.clientHello.cipherSuites) { - preferenceList = defaultCipherSuitesTLS13NoAES - } - } - for _, suiteID := range preferenceList { - hs.suite = mutualCipherSuiteTLS13(hs.clientHello.cipherSuites, suiteID) - if hs.suite != nil { - break - } + preferenceList := defaultCipherSuitesTLS13 + if !hasAESGCMHardwareSupport || !aesgcmPreferred(hs.clientHello.cipherSuites) { + preferenceList = defaultCipherSuitesTLS13NoAES + } + for _, suiteID := range preferenceList { + hs.suite = mutualCipherSuiteTLS13(hs.clientHello.cipherSuites, suiteID) + if hs.suite != nil { + break } } if hs.suite == nil { @@ -208,18 +198,23 @@ GroupSelection: clientKeyShare = &hs.clientHello.keyShares[0] } - if _, ok := curveForCurveID(selectedGroup); selectedGroup != X25519 && !ok { + if _, ok := curveForCurveID(selectedGroup); !ok { c.sendAlert(alertInternalError) return errors.New("tls: CurvePreferences includes unsupported curve") } - params, err := generateECDHEParameters(c.config.rand(), selectedGroup) + key, err := generateECDHEKey(c.config.rand(), selectedGroup) if err != nil { c.sendAlert(alertInternalError) return err } - hs.hello.serverShare = keyShare{group: selectedGroup, data: params.PublicKey()} - hs.sharedKey = params.SharedKey(clientKeyShare.data) - if hs.sharedKey == nil { + hs.hello.serverShare = keyShare{group: selectedGroup, data: key.PublicKey().Bytes()} + peerKey, err := key.Curve().NewPublicKey(clientKeyShare.data) + if err != nil { + c.sendAlert(alertIllegalParameter) + return errors.New("tls: invalid client key share") + } + hs.sharedKey, err = key.ECDH(peerKey) + if err != nil { c.sendAlert(alertIllegalParameter) return errors.New("tls: invalid client key share") } @@ -330,7 +325,12 @@ func (hs *serverHandshakeStateTLS13) checkForResumption() error { c.sendAlert(alertInternalError) return errors.New("tls: internal error: failed to clone hash") } - transcript.Write(hs.clientHello.marshalWithoutBinders()) + clientHelloBytes, err := hs.clientHello.marshalWithoutBinders() + if err != nil { + c.sendAlert(alertInternalError) + return err + } + transcript.Write(clientHelloBytes) pskBinder := hs.suite.finishedHash(binderKey, transcript) if !hmac.Equal(hs.clientHello.pskBinders[i], pskBinder) { c.sendAlert(alertDecryptError) @@ -343,7 +343,12 @@ func (hs *serverHandshakeStateTLS13) checkForResumption() error { } h := cloneHash(hs.transcript, hs.suite.hash) - h.Write(hs.clientHello.marshal()) + clientHelloWithBindersBytes, err := hs.clientHello.marshal() + if err != nil { + c.sendAlert(alertInternalError) + return err + } + h.Write(clientHelloWithBindersBytes) if hs.encryptedExtensions.earlyData { clientEarlySecret := hs.suite.deriveSecret(hs.earlySecret, "c e traffic", h) c.in.exportKey(Encryption0RTT, hs.suite, clientEarlySecret) @@ -432,8 +437,7 @@ func (hs *serverHandshakeStateTLS13) sendDummyChangeCipherSpec() error { } hs.sentDummyCCS = true - _, err := hs.c.writeRecord(recordTypeChangeCipherSpec, []byte{1}) - return err + return hs.c.writeChangeCipherRecord() } func (hs *serverHandshakeStateTLS13) doHelloRetryRequest(selectedGroup CurveID) error { @@ -441,7 +445,9 @@ func (hs *serverHandshakeStateTLS13) doHelloRetryRequest(selectedGroup CurveID) // The first ClientHello gets double-hashed into the transcript upon a // HelloRetryRequest. See RFC 8446, Section 4.4.1. - hs.transcript.Write(hs.clientHello.marshal()) + if err := transcriptMsg(hs.clientHello, hs.transcript); err != nil { + return err + } chHash := hs.transcript.Sum(nil) hs.transcript.Reset() hs.transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))}) @@ -457,8 +463,7 @@ func (hs *serverHandshakeStateTLS13) doHelloRetryRequest(selectedGroup CurveID) selectedGroup: selectedGroup, } - hs.transcript.Write(helloRetryRequest.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, helloRetryRequest.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(helloRetryRequest, hs.transcript); err != nil { return err } @@ -466,7 +471,8 @@ func (hs *serverHandshakeStateTLS13) doHelloRetryRequest(selectedGroup CurveID) return err } - msg, err := c.readHandshake() + // clientHelloMsg is not included in the transcript. + msg, err := c.readHandshake(nil) if err != nil { return err } @@ -562,9 +568,10 @@ func illegalClientHelloChange(ch, ch1 *clientHelloMsg) bool { func (hs *serverHandshakeStateTLS13) sendServerParameters() error { c := hs.c - hs.transcript.Write(hs.clientHello.marshal()) - hs.transcript.Write(hs.hello.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil { + if err := transcriptMsg(hs.clientHello, hs.transcript); err != nil { + return err + } + if _, err := hs.c.writeHandshakeRecord(hs.hello, hs.transcript); err != nil { return err } @@ -607,8 +614,7 @@ func (hs *serverHandshakeStateTLS13) sendServerParameters() error { hs.encryptedExtensions.additionalExtensions = hs.c.extraConfig.GetExtensions(typeEncryptedExtensions) } - hs.transcript.Write(hs.encryptedExtensions.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, hs.encryptedExtensions.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(hs.encryptedExtensions, hs.transcript); err != nil { return err } @@ -632,13 +638,12 @@ func (hs *serverHandshakeStateTLS13) sendServerCertificate() error { certReq := new(certificateRequestMsgTLS13) certReq.ocspStapling = true certReq.scts = true - certReq.supportedSignatureAlgorithms = supportedSignatureAlgorithms + certReq.supportedSignatureAlgorithms = supportedSignatureAlgorithms() if c.config.ClientCAs != nil { certReq.certificateAuthorities = c.config.ClientCAs.Subjects() } - hs.transcript.Write(certReq.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certReq.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(certReq, hs.transcript); err != nil { return err } } @@ -649,8 +654,7 @@ func (hs *serverHandshakeStateTLS13) sendServerCertificate() error { certMsg.scts = hs.clientHello.scts && len(hs.cert.SignedCertificateTimestamps) > 0 certMsg.ocspStapling = hs.clientHello.ocspStapling && len(hs.cert.OCSPStaple) > 0 - hs.transcript.Write(certMsg.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(certMsg, hs.transcript); err != nil { return err } @@ -681,8 +685,7 @@ func (hs *serverHandshakeStateTLS13) sendServerCertificate() error { } certVerifyMsg.signature = sig - hs.transcript.Write(certVerifyMsg.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certVerifyMsg.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(certVerifyMsg, hs.transcript); err != nil { return err } @@ -696,8 +699,7 @@ func (hs *serverHandshakeStateTLS13) sendServerFinished() error { verifyData: hs.suite.finishedHash(c.out.trafficSecret, hs.transcript), } - hs.transcript.Write(finished.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil { + if _, err := hs.c.writeHandshakeRecord(finished, hs.transcript); err != nil { return err } @@ -759,7 +761,9 @@ func (hs *serverHandshakeStateTLS13) sendSessionTickets() error { finishedMsg := &finishedMsg{ verifyData: hs.clientFinished, } - hs.transcript.Write(finishedMsg.marshal()) + if err := transcriptMsg(finishedMsg, hs.transcript); err != nil { + return err + } if !hs.shouldSendSessionTickets() { return nil @@ -780,7 +784,7 @@ func (hs *serverHandshakeStateTLS13) sendSessionTickets() error { return err } - if _, err := c.writeRecord(recordTypeHandshake, m.marshal()); err != nil { + if _, err := c.writeHandshakeRecord(m, nil); err != nil { return err } @@ -805,7 +809,7 @@ func (hs *serverHandshakeStateTLS13) readClientCertificate() error { // If we requested a client certificate, then the client must send a // certificate message. If it's empty, no CertificateVerify is sent. - msg, err := c.readHandshake() + msg, err := c.readHandshake(hs.transcript) if err != nil { return err } @@ -815,7 +819,6 @@ func (hs *serverHandshakeStateTLS13) readClientCertificate() error { c.sendAlert(alertUnexpectedMessage) return unexpectedMessageError(certMsg, msg) } - hs.transcript.Write(certMsg.marshal()) if err := c.processCertsFromClient(certMsg.certificate); err != nil { return err @@ -829,7 +832,10 @@ func (hs *serverHandshakeStateTLS13) readClientCertificate() error { } if len(certMsg.certificate.Certificate) != 0 { - msg, err = c.readHandshake() + // certificateVerifyMsg is included in the transcript, but not until + // after we verify the handshake signature, since the state before + // this message was sent is used. + msg, err = c.readHandshake(nil) if err != nil { return err } @@ -841,7 +847,7 @@ func (hs *serverHandshakeStateTLS13) readClientCertificate() error { } // See RFC 8446, Section 4.4.3. - if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, supportedSignatureAlgorithms) { + if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, supportedSignatureAlgorithms()) { c.sendAlert(alertIllegalParameter) return errors.New("tls: client certificate used with invalid signature algorithm") } @@ -860,7 +866,9 @@ func (hs *serverHandshakeStateTLS13) readClientCertificate() error { return errors.New("tls: invalid signature by the client certificate: " + err.Error()) } - hs.transcript.Write(certVerify.marshal()) + if err := transcriptMsg(certVerify, hs.transcript); err != nil { + return err + } } // If we waited until the client certificates to send session tickets, we @@ -875,7 +883,8 @@ func (hs *serverHandshakeStateTLS13) readClientCertificate() error { func (hs *serverHandshakeStateTLS13) readClientFinished() error { c := hs.c - msg, err := c.readHandshake() + // finishedMsg is not included in the transcript. + msg, err := c.readHandshake(nil) if err != nil { return err } diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/key_agreement.go b/vendor/github.com/quic-go/qtls-go1-20/key_agreement.go similarity index 94% rename from vendor/github.com/marten-seemann/qtls-go1-19/key_agreement.go rename to vendor/github.com/quic-go/qtls-go1-20/key_agreement.go index 453a8dcf..f926869a 100644 --- a/vendor/github.com/marten-seemann/qtls-go1-19/key_agreement.go +++ b/vendor/github.com/quic-go/qtls-go1-20/key_agreement.go @@ -6,6 +6,7 @@ package qtls import ( "crypto" + "crypto/ecdh" "crypto/md5" "crypto/rsa" "crypto/sha1" @@ -157,7 +158,7 @@ func hashForServerKeyExchange(sigType uint8, hashFunc crypto.Hash, version uint1 type ecdheKeyAgreement struct { version uint16 isRSA bool - params ecdheParameters + key *ecdh.PrivateKey // ckx and preMasterSecret are generated in processServerKeyExchange // and returned in generateClientKeyExchange. @@ -177,18 +178,18 @@ func (ka *ecdheKeyAgreement) generateServerKeyExchange(config *config, cert *Cer if curveID == 0 { return nil, errors.New("tls: no supported elliptic curves offered") } - if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok { + if _, ok := curveForCurveID(curveID); !ok { return nil, errors.New("tls: CurvePreferences includes unsupported curve") } - params, err := generateECDHEParameters(config.rand(), curveID) + key, err := generateECDHEKey(config.rand(), curveID) if err != nil { return nil, err } - ka.params = params + ka.key = key // See RFC 4492, Section 5.4. - ecdhePublic := params.PublicKey() + ecdhePublic := key.PublicKey().Bytes() serverECDHEParams := make([]byte, 1+2+1+len(ecdhePublic)) serverECDHEParams[0] = 3 // named curve serverECDHEParams[1] = byte(curveID >> 8) @@ -259,8 +260,12 @@ func (ka *ecdheKeyAgreement) processClientKeyExchange(config *config, cert *Cert return nil, errClientKeyExchange } - preMasterSecret := ka.params.SharedKey(ckx.ciphertext[1:]) - if preMasterSecret == nil { + peerKey, err := ka.key.Curve().NewPublicKey(ckx.ciphertext[1:]) + if err != nil { + return nil, errClientKeyExchange + } + preMasterSecret, err := ka.key.ECDH(peerKey) + if err != nil { return nil, errClientKeyExchange } @@ -288,22 +293,26 @@ func (ka *ecdheKeyAgreement) processServerKeyExchange(config *config, clientHell return errServerKeyExchange } - if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok { + if _, ok := curveForCurveID(curveID); !ok { return errors.New("tls: server selected unsupported curve") } - params, err := generateECDHEParameters(config.rand(), curveID) + key, err := generateECDHEKey(config.rand(), curveID) if err != nil { return err } - ka.params = params + ka.key = key - ka.preMasterSecret = params.SharedKey(publicKey) - if ka.preMasterSecret == nil { + peerKey, err := key.Curve().NewPublicKey(publicKey) + if err != nil { + return errServerKeyExchange + } + ka.preMasterSecret, err = key.ECDH(peerKey) + if err != nil { return errServerKeyExchange } - ourPublicKey := params.PublicKey() + ourPublicKey := key.PublicKey().Bytes() ka.ckx = new(clientKeyExchangeMsg) ka.ckx.ciphertext = make([]byte, 1+len(ourPublicKey)) ka.ckx.ciphertext[0] = byte(len(ourPublicKey)) diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/key_schedule.go b/vendor/github.com/quic-go/qtls-go1-20/key_schedule.go similarity index 63% rename from vendor/github.com/marten-seemann/qtls-go1-18/key_schedule.go rename to vendor/github.com/quic-go/qtls-go1-20/key_schedule.go index da13904a..c410a3e8 100644 --- a/vendor/github.com/marten-seemann/qtls-go1-18/key_schedule.go +++ b/vendor/github.com/quic-go/qtls-go1-20/key_schedule.go @@ -5,15 +5,14 @@ package qtls import ( - "crypto/elliptic" + "crypto/ecdh" "crypto/hmac" "errors" + "fmt" "hash" "io" - "math/big" "golang.org/x/crypto/cryptobyte" - "golang.org/x/crypto/curve25519" "golang.org/x/crypto/hkdf" ) @@ -42,8 +41,24 @@ func (c *cipherSuiteTLS13) expandLabel(secret []byte, label string, context []by hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { b.AddBytes(context) }) + hkdfLabelBytes, err := hkdfLabel.Bytes() + if err != nil { + // Rather than calling BytesOrPanic, we explicitly handle this error, in + // order to provide a reasonable error message. It should be basically + // impossible for this to panic, and routing errors back through the + // tree rooted in this function is quite painful. The labels are fixed + // size, and the context is either a fixed-length computed hash, or + // parsed from a field which has the same length limitation. As such, an + // error here is likely to only be caused during development. + // + // NOTE: another reasonable approach here might be to return a + // randomized slice if we encounter an error, which would break the + // connection, but avoid panicking. This would perhaps be safer but + // significantly more confusing to users. + panic(fmt.Errorf("failed to construct HKDF label: %s", err)) + } out := make([]byte, length) - n, err := hkdf.Expand(c.hash.New, secret, hkdfLabel.BytesOrPanic()).Read(out) + n, err := hkdf.Expand(c.hash.New, secret, hkdfLabelBytes).Read(out) if err != nil || n != length { panic("tls: HKDF-Expand-Label invocation failed unexpectedly") } @@ -101,99 +116,43 @@ func (c *cipherSuiteTLS13) exportKeyingMaterial(masterSecret []byte, transcript } } -// ecdheParameters implements Diffie-Hellman with either NIST curves or X25519, +// generateECDHEKey returns a PrivateKey that implements Diffie-Hellman // according to RFC 8446, Section 4.2.8.2. -type ecdheParameters interface { - CurveID() CurveID - PublicKey() []byte - SharedKey(peerPublicKey []byte) []byte -} - -func generateECDHEParameters(rand io.Reader, curveID CurveID) (ecdheParameters, error) { - if curveID == X25519 { - privateKey := make([]byte, curve25519.ScalarSize) - if _, err := io.ReadFull(rand, privateKey); err != nil { - return nil, err - } - publicKey, err := curve25519.X25519(privateKey, curve25519.Basepoint) - if err != nil { - return nil, err - } - return &x25519Parameters{privateKey: privateKey, publicKey: publicKey}, nil - } - +func generateECDHEKey(rand io.Reader, curveID CurveID) (*ecdh.PrivateKey, error) { curve, ok := curveForCurveID(curveID) if !ok { return nil, errors.New("tls: internal error: unsupported curve") } - p := &nistParameters{curveID: curveID} - var err error - p.privateKey, p.x, p.y, err = elliptic.GenerateKey(curve, rand) - if err != nil { - return nil, err - } - return p, nil + return curve.GenerateKey(rand) } -func curveForCurveID(id CurveID) (elliptic.Curve, bool) { +func curveForCurveID(id CurveID) (ecdh.Curve, bool) { switch id { + case X25519: + return ecdh.X25519(), true case CurveP256: - return elliptic.P256(), true + return ecdh.P256(), true case CurveP384: - return elliptic.P384(), true + return ecdh.P384(), true case CurveP521: - return elliptic.P521(), true + return ecdh.P521(), true default: return nil, false } } -type nistParameters struct { - privateKey []byte - x, y *big.Int // public key - curveID CurveID -} - -func (p *nistParameters) CurveID() CurveID { - return p.curveID -} - -func (p *nistParameters) PublicKey() []byte { - curve, _ := curveForCurveID(p.curveID) - return elliptic.Marshal(curve, p.x, p.y) -} - -func (p *nistParameters) SharedKey(peerPublicKey []byte) []byte { - curve, _ := curveForCurveID(p.curveID) - // Unmarshal also checks whether the given point is on the curve. - x, y := elliptic.Unmarshal(curve, peerPublicKey) - if x == nil { - return nil - } - - xShared, _ := curve.ScalarMult(x, y, p.privateKey) - sharedKey := make([]byte, (curve.Params().BitSize+7)/8) - return xShared.FillBytes(sharedKey) -} - -type x25519Parameters struct { - privateKey []byte - publicKey []byte -} - -func (p *x25519Parameters) CurveID() CurveID { - return X25519 -} - -func (p *x25519Parameters) PublicKey() []byte { - return p.publicKey[:] -} - -func (p *x25519Parameters) SharedKey(peerPublicKey []byte) []byte { - sharedKey, err := curve25519.X25519(p.privateKey, peerPublicKey) - if err != nil { - return nil +func curveIDForCurve(curve ecdh.Curve) (CurveID, bool) { + switch curve { + case ecdh.X25519(): + return X25519, true + case ecdh.P256(): + return CurveP256, true + case ecdh.P384(): + return CurveP384, true + case ecdh.P521(): + return CurveP521, true + default: + return 0, false } - return sharedKey } diff --git a/vendor/github.com/quic-go/qtls-go1-20/notboring.go b/vendor/github.com/quic-go/qtls-go1-20/notboring.go new file mode 100644 index 00000000..f292e4f0 --- /dev/null +++ b/vendor/github.com/quic-go/qtls-go1-20/notboring.go @@ -0,0 +1,18 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package qtls + +func needFIPS() bool { return false } + +func supportedSignatureAlgorithms() []SignatureScheme { + return defaultSupportedSignatureAlgorithms +} + +func fipsMinVersion(c *config) uint16 { panic("fipsMinVersion") } +func fipsMaxVersion(c *config) uint16 { panic("fipsMaxVersion") } +func fipsCurvePreferences(c *config) []CurveID { panic("fipsCurvePreferences") } +func fipsCipherSuites(c *config) []uint16 { panic("fipsCipherSuites") } + +var fipsSupportedSignatureAlgorithms []SignatureScheme diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/prf.go b/vendor/github.com/quic-go/qtls-go1-20/prf.go similarity index 99% rename from vendor/github.com/marten-seemann/qtls-go1-19/prf.go rename to vendor/github.com/quic-go/qtls-go1-20/prf.go index 9eb0221a..14712891 100644 --- a/vendor/github.com/marten-seemann/qtls-go1-19/prf.go +++ b/vendor/github.com/quic-go/qtls-go1-20/prf.go @@ -215,7 +215,7 @@ func (h finishedHash) serverSum(masterSecret []byte) []byte { // hashForClientCertificate returns the handshake messages so far, pre-hashed if // necessary, suitable for signing by a TLS client certificate. -func (h finishedHash) hashForClientCertificate(sigType uint8, hashAlg crypto.Hash, masterSecret []byte) []byte { +func (h finishedHash) hashForClientCertificate(sigType uint8, hashAlg crypto.Hash) []byte { if (h.version >= VersionTLS12 || sigType == signatureEd25519) && h.buffer == nil { panic("tls: handshake hash for a client certificate requested after discarding the handshake buffer") } diff --git a/vendor/github.com/marten-seemann/qtls-go1-18/ticket.go b/vendor/github.com/quic-go/qtls-go1-20/ticket.go similarity index 95% rename from vendor/github.com/marten-seemann/qtls-go1-18/ticket.go rename to vendor/github.com/quic-go/qtls-go1-20/ticket.go index 81e8a52e..1b9289c2 100644 --- a/vendor/github.com/marten-seemann/qtls-go1-18/ticket.go +++ b/vendor/github.com/quic-go/qtls-go1-20/ticket.go @@ -34,7 +34,7 @@ type sessionState struct { usedOldKey bool } -func (m *sessionState) marshal() []byte { +func (m *sessionState) marshal() ([]byte, error) { var b cryptobyte.Builder b.AddUint16(m.vers) b.AddUint16(m.cipherSuite) @@ -49,7 +49,7 @@ func (m *sessionState) marshal() []byte { }) } }) - return b.BytesOrPanic() + return b.Bytes() } func (m *sessionState) unmarshal(data []byte) bool { @@ -94,7 +94,7 @@ type sessionStateTLS13 struct { appData []byte } -func (m *sessionStateTLS13) marshal() []byte { +func (m *sessionStateTLS13) marshal() ([]byte, error) { var b cryptobyte.Builder b.AddUint16(VersionTLS13) b.AddUint8(2) // revision @@ -111,7 +111,7 @@ func (m *sessionStateTLS13) marshal() []byte { b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { b.AddBytes(m.appData) }) - return b.BytesOrPanic() + return b.Bytes() } func (m *sessionStateTLS13) unmarshal(data []byte) bool { @@ -227,8 +227,11 @@ func (c *Conn) getSessionTicketMsg(appData []byte) (*newSessionTicketMsgTLS13, e if c.extraConfig != nil { state.maxEarlyData = c.extraConfig.MaxEarlyData } - var err error - m.label, err = c.encryptTicket(state.marshal()) + stateBytes, err := state.marshal() + if err != nil { + return nil, err + } + m.label, err = c.encryptTicket(stateBytes) if err != nil { return nil, err } @@ -259,7 +262,7 @@ func (c *Conn) getSessionTicketMsg(appData []byte) (*newSessionTicketMsgTLS13, e // The ticket may be nil if config.SessionTicketsDisabled is set, // or if the client isn't able to receive session tickets. func (c *Conn) GetSessionTicket(appData []byte) ([]byte, error) { - if c.isClient || !c.handshakeComplete() || c.extraConfig == nil || c.extraConfig.AlternativeRecordLayer == nil { + if c.isClient || !c.isHandshakeComplete.Load() || c.extraConfig == nil || c.extraConfig.AlternativeRecordLayer == nil { return nil, errors.New("GetSessionTicket is only valid for servers after completion of the handshake, and if an alternative record layer is set.") } if c.config.SessionTicketsDisabled { @@ -270,5 +273,5 @@ func (c *Conn) GetSessionTicket(appData []byte) ([]byte, error) { if err != nil { return nil, err } - return m.marshal(), nil + return m.marshal() } diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/tls.go b/vendor/github.com/quic-go/qtls-go1-20/tls.go similarity index 100% rename from vendor/github.com/marten-seemann/qtls-go1-19/tls.go rename to vendor/github.com/quic-go/qtls-go1-20/tls.go diff --git a/vendor/github.com/marten-seemann/qtls-go1-19/unsafe.go b/vendor/github.com/quic-go/qtls-go1-20/unsafe.go similarity index 100% rename from vendor/github.com/marten-seemann/qtls-go1-19/unsafe.go rename to vendor/github.com/quic-go/qtls-go1-20/unsafe.go diff --git a/vendor/github.com/lucas-clemente/quic-go/.gitignore b/vendor/github.com/quic-go/quic-go/.gitignore similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/.gitignore rename to vendor/github.com/quic-go/quic-go/.gitignore diff --git a/vendor/github.com/lucas-clemente/quic-go/.golangci.yml b/vendor/github.com/quic-go/quic-go/.golangci.yml similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/.golangci.yml rename to vendor/github.com/quic-go/quic-go/.golangci.yml diff --git a/vendor/github.com/lucas-clemente/quic-go/Changelog.md b/vendor/github.com/quic-go/quic-go/Changelog.md similarity index 97% rename from vendor/github.com/lucas-clemente/quic-go/Changelog.md rename to vendor/github.com/quic-go/quic-go/Changelog.md index c1c33232..82df5fb2 100644 --- a/vendor/github.com/lucas-clemente/quic-go/Changelog.md +++ b/vendor/github.com/quic-go/quic-go/Changelog.md @@ -101,8 +101,8 @@ - Add a `quic.Config` option to configure keep-alive - Rename the STK to Cookie - Implement `net.Conn`-style deadlines for streams -- Remove the `tls.Config` from the `quic.Config`. The `tls.Config` must now be passed to the `Dial` and `Listen` functions as a separate parameter. See the [Godoc](https://godoc.org/github.com/lucas-clemente/quic-go) for details. -- Changed the log level environment variable to only accept strings ("DEBUG", "INFO", "ERROR"), see [the wiki](https://github.com/lucas-clemente/quic-go/wiki/Logging) for more details. +- Remove the `tls.Config` from the `quic.Config`. The `tls.Config` must now be passed to the `Dial` and `Listen` functions as a separate parameter. See the [Godoc](https://godoc.org/github.com/quic-go/quic-go) for details. +- Changed the log level environment variable to only accept strings ("DEBUG", "INFO", "ERROR"), see [the wiki](https://github.com/quic-go/quic-go/wiki/Logging) for more details. - Rename the `h2quic.QuicRoundTripper` to `h2quic.RoundTripper` - Changed `h2quic.Server.Serve()` to accept a `net.PacketConn` - Drop support for Go 1.7 and 1.8. diff --git a/vendor/github.com/lucas-clemente/quic-go/LICENSE b/vendor/github.com/quic-go/quic-go/LICENSE similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/LICENSE rename to vendor/github.com/quic-go/quic-go/LICENSE diff --git a/vendor/github.com/quic-go/quic-go/README.md b/vendor/github.com/quic-go/quic-go/README.md new file mode 100644 index 00000000..977bb928 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/README.md @@ -0,0 +1,63 @@ +# A QUIC implementation in pure Go + + + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/quic-go/quic-go)](https://pkg.go.dev/github.com/quic-go/quic-go) +[![Code Coverage](https://img.shields.io/codecov/c/github/quic-go/quic-go/master.svg?style=flat-square)](https://codecov.io/gh/quic-go/quic-go/) + +quic-go is an implementation of the QUIC protocol ([RFC 9000](https://datatracker.ietf.org/doc/html/rfc9000), [RFC 9001](https://datatracker.ietf.org/doc/html/rfc9001), [RFC 9002](https://datatracker.ietf.org/doc/html/rfc9002)) in Go, including the Unreliable Datagram Extension ([RFC 9221](https://datatracker.ietf.org/doc/html/rfc9221)) and Datagram Packetization Layer Path MTU + Discovery (DPLPMTUD, [RFC 8899](https://datatracker.ietf.org/doc/html/rfc8899)). It has support for HTTP/3 ([RFC 9114](https://datatracker.ietf.org/doc/html/rfc9114)), including QPACK ([RFC 9204](https://datatracker.ietf.org/doc/html/rfc9204)). + +In addition to the RFCs listed above, it currently implements the [IETF QUIC draft-29](https://tools.ietf.org/html/draft-ietf-quic-transport-29). Support for draft-29 will eventually be dropped, as it is phased out of the ecosystem. + +## Guides + +*We currently support Go 1.19.x and Go 1.20.x* + +Running tests: + + go test ./... + +### QUIC without HTTP/3 + +Take a look at [this echo example](example/echo/echo.go). + +## Usage + +### As a server + +See the [example server](example/main.go). Starting a QUIC server is very similar to the standard lib http in go: + +```go +http.Handle("/", http.FileServer(http.Dir(wwwDir))) +http3.ListenAndServeQUIC("localhost:4242", "/path/to/cert/chain.pem", "/path/to/privkey.pem", nil) +``` + +### As a client + +See the [example client](example/client/main.go). Use a `http3.RoundTripper` as a `Transport` in a `http.Client`. + +```go +http.Client{ + Transport: &http3.RoundTripper{}, +} +``` + +## Projects using quic-go + +| Project | Description | Stars | +|-----------------------------------------------------------|---------------------------------------------------------------------------------------------------------|-------| +| [AdGuardHome](https://github.com/AdguardTeam/AdGuardHome) | Free and open source, powerful network-wide ads & trackers blocking DNS server. | ![GitHub Repo stars](https://img.shields.io/github/stars/AdguardTeam/AdGuardHome?style=flat-square) | +| [algernon](https://github.com/xyproto/algernon) | Small self-contained pure-Go web server with Lua, Markdown, HTTP/2, QUIC, Redis and PostgreSQL support | ![GitHub Repo stars](https://img.shields.io/github/stars/xyproto/algernon?style=flat-square) | +| [caddy](https://github.com/caddyserver/caddy/) | Fast, multi-platform web server with automatic HTTPS | ![GitHub Repo stars](https://img.shields.io/github/stars/caddyserver/caddy?style=flat-square) | +| [cloudflared](https://github.com/cloudflare/cloudflared) | A tunneling daemon that proxies traffic from the Cloudflare network to your origins | ![GitHub Repo stars](https://img.shields.io/github/stars/cloudflare/cloudflared?style=flat-square) | +| [go-libp2p](https://github.com/libp2p/go-libp2p) | libp2p implementation in Go, powering [Kubo](https://github.com/ipfs/kubo) (IPFS) and [Lotus](https://github.com/filecoin-project/lotus) (Filecoin), among others | ![GitHub Repo stars](https://img.shields.io/github/stars/libp2p/go-libp2p?style=flat-square) | +| [OONI Probe](https://github.com/ooni/probe-cli) | Next generation OONI Probe. Library and CLI tool. | ![GitHub Repo stars](https://img.shields.io/github/stars/ooni/probe-cli?style=flat-square) | +| [syncthing](https://github.com/syncthing/syncthing/) | Open Source Continuous File Synchronization | ![GitHub Repo stars](https://img.shields.io/github/stars/syncthing/syncthing?style=flat-square) | +| [traefik](https://github.com/traefik/traefik) | The Cloud Native Application Proxy | ![GitHub Repo stars](https://img.shields.io/github/stars/traefik/traefik?style=flat-square) | +| [v2ray-core](https://github.com/v2fly/v2ray-core) | A platform for building proxies to bypass network restrictions | ![GitHub Repo stars](https://img.shields.io/github/stars/v2fly/v2ray-core?style=flat-square) | +| [YoMo](https://github.com/yomorun/yomo) | Streaming Serverless Framework for Geo-distributed System | ![GitHub Repo stars](https://img.shields.io/github/stars/yomorun/yomo?style=flat-square) | + +## Contributing + +We are always happy to welcome new contributors! We have a number of self-contained issues that are suitable for first-time contributors, they are tagged with [help wanted](https://github.com/quic-go/quic-go/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22). If you have any questions, please feel free to reach out by opening an issue or leaving a comment. diff --git a/vendor/github.com/lucas-clemente/quic-go/buffer_pool.go b/vendor/github.com/quic-go/quic-go/buffer_pool.go similarity index 96% rename from vendor/github.com/lucas-clemente/quic-go/buffer_pool.go rename to vendor/github.com/quic-go/quic-go/buffer_pool.go index c0b7067d..f6745b08 100644 --- a/vendor/github.com/lucas-clemente/quic-go/buffer_pool.go +++ b/vendor/github.com/quic-go/quic-go/buffer_pool.go @@ -3,7 +3,7 @@ package quic import ( "sync" - "github.com/lucas-clemente/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/protocol" ) type packetBuffer struct { diff --git a/vendor/github.com/lucas-clemente/quic-go/client.go b/vendor/github.com/quic-go/quic-go/client.go similarity index 98% rename from vendor/github.com/lucas-clemente/quic-go/client.go rename to vendor/github.com/quic-go/quic-go/client.go index a93f7536..b05f0ab2 100644 --- a/vendor/github.com/lucas-clemente/quic-go/client.go +++ b/vendor/github.com/quic-go/quic-go/client.go @@ -7,9 +7,9 @@ import ( "fmt" "net" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/logging" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/logging" ) type client struct { diff --git a/vendor/github.com/lucas-clemente/quic-go/closed_conn.go b/vendor/github.com/quic-go/quic-go/closed_conn.go similarity index 95% rename from vendor/github.com/lucas-clemente/quic-go/closed_conn.go rename to vendor/github.com/quic-go/quic-go/closed_conn.go index d46e393a..73904b84 100644 --- a/vendor/github.com/lucas-clemente/quic-go/closed_conn.go +++ b/vendor/github.com/quic-go/quic-go/closed_conn.go @@ -4,8 +4,8 @@ import ( "math/bits" "net" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" ) // A closedLocalConn is a connection that we closed locally. diff --git a/vendor/github.com/lucas-clemente/quic-go/codecov.yml b/vendor/github.com/quic-go/quic-go/codecov.yml similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/codecov.yml rename to vendor/github.com/quic-go/quic-go/codecov.yml diff --git a/vendor/github.com/lucas-clemente/quic-go/config.go b/vendor/github.com/quic-go/quic-go/config.go similarity index 97% rename from vendor/github.com/lucas-clemente/quic-go/config.go rename to vendor/github.com/quic-go/quic-go/config.go index 0e8cc98a..3ead9b7a 100644 --- a/vendor/github.com/lucas-clemente/quic-go/config.go +++ b/vendor/github.com/quic-go/quic-go/config.go @@ -5,8 +5,8 @@ import ( "net" "time" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" ) // Clone clones a Config @@ -135,6 +135,7 @@ func populateConfig(config *Config, defaultConnIDLen int) *Config { EnableDatagrams: config.EnableDatagrams, DisablePathMTUDiscovery: config.DisablePathMTUDiscovery, DisableVersionNegotiationPackets: config.DisableVersionNegotiationPackets, + Allow0RTT: config.Allow0RTT, Tracer: config.Tracer, } } diff --git a/vendor/github.com/lucas-clemente/quic-go/conn_id_generator.go b/vendor/github.com/quic-go/quic-go/conn_id_generator.go similarity index 93% rename from vendor/github.com/lucas-clemente/quic-go/conn_id_generator.go rename to vendor/github.com/quic-go/quic-go/conn_id_generator.go index c56e8a4c..2d28dc61 100644 --- a/vendor/github.com/lucas-clemente/quic-go/conn_id_generator.go +++ b/vendor/github.com/quic-go/quic-go/conn_id_generator.go @@ -3,10 +3,10 @@ package quic import ( "fmt" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" ) type connIDGenerator struct { @@ -22,8 +22,6 @@ type connIDGenerator struct { retireConnectionID func(protocol.ConnectionID) replaceWithClosed func([]protocol.ConnectionID, protocol.Perspective, []byte) queueControlFrame func(wire.Frame) - - version protocol.VersionNumber } func newConnIDGenerator( @@ -36,7 +34,6 @@ func newConnIDGenerator( replaceWithClosed func([]protocol.ConnectionID, protocol.Perspective, []byte), queueControlFrame func(wire.Frame), generator ConnectionIDGenerator, - version protocol.VersionNumber, ) *connIDGenerator { m := &connIDGenerator{ generator: generator, @@ -47,7 +44,6 @@ func newConnIDGenerator( retireConnectionID: retireConnectionID, replaceWithClosed: replaceWithClosed, queueControlFrame: queueControlFrame, - version: version, } m.activeSrcConnIDs[0] = initialConnectionID m.initialClientDestConnID = initialClientDestConnID diff --git a/vendor/github.com/lucas-clemente/quic-go/conn_id_manager.go b/vendor/github.com/quic-go/quic-go/conn_id_manager.go similarity index 95% rename from vendor/github.com/lucas-clemente/quic-go/conn_id_manager.go rename to vendor/github.com/quic-go/quic-go/conn_id_manager.go index b878b027..ba65aec0 100644 --- a/vendor/github.com/lucas-clemente/quic-go/conn_id_manager.go +++ b/vendor/github.com/quic-go/quic-go/conn_id_manager.go @@ -3,11 +3,11 @@ package quic import ( "fmt" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/utils" - list "github.com/lucas-clemente/quic-go/internal/utils/linkedlist" - "github.com/lucas-clemente/quic-go/internal/wire" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/utils" + list "github.com/quic-go/quic-go/internal/utils/linkedlist" + "github.com/quic-go/quic-go/internal/wire" ) type newConnID struct { diff --git a/vendor/github.com/lucas-clemente/quic-go/connection.go b/vendor/github.com/quic-go/quic-go/connection.go similarity index 89% rename from vendor/github.com/lucas-clemente/quic-go/connection.go rename to vendor/github.com/quic-go/quic-go/connection.go index 0df45e78..50db2964 100644 --- a/vendor/github.com/lucas-clemente/quic-go/connection.go +++ b/vendor/github.com/quic-go/quic-go/connection.go @@ -13,19 +13,19 @@ import ( "sync/atomic" "time" - "github.com/lucas-clemente/quic-go/internal/ackhandler" - "github.com/lucas-clemente/quic-go/internal/flowcontrol" - "github.com/lucas-clemente/quic-go/internal/handshake" - "github.com/lucas-clemente/quic-go/internal/logutils" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" - "github.com/lucas-clemente/quic-go/logging" + "github.com/quic-go/quic-go/internal/ackhandler" + "github.com/quic-go/quic-go/internal/flowcontrol" + "github.com/quic-go/quic-go/internal/handshake" + "github.com/quic-go/quic-go/internal/logutils" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" + "github.com/quic-go/quic-go/logging" ) type unpacker interface { - UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, data []byte) (*unpackedPacket, error) + UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, data []byte, v protocol.VersionNumber) (*unpackedPacket, error) UnpackShortHeader(rcvTime time.Time, data []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error) } @@ -218,6 +218,9 @@ type connection struct { datagramQueue *datagramQueue + connStateMutex sync.Mutex + connState ConnectionState + logID string tracer logging.ConnectionTracer logger utils.Logger @@ -241,7 +244,6 @@ var newConnection = func( conf *Config, tlsConf *tls.Config, tokenGenerator *handshake.TokenGenerator, - enable0RTT bool, clientAddressValidated bool, tracer logging.ConnectionTracer, tracingID uint64, @@ -282,7 +284,6 @@ var newConnection = func( runner.ReplaceWithClosed, s.queueControlFrame, s.config.ConnectionIDGenerator, - s.version, ) s.preSetup() s.ctx, s.ctxCancel = context.WithCancel(context.WithValue(context.Background(), ConnectionTracingKey, tracingID)) @@ -294,7 +295,6 @@ var newConnection = func( s.perspective, s.tracer, s.logger, - s.version, ) initialStream := newCryptoStream() handshakeStream := newCryptoStream() @@ -323,6 +323,10 @@ var newConnection = func( if s.tracer != nil { s.tracer.SentTransportParameters(params) } + var allow0RTT func() bool + if conf.Allow0RTT != nil { + allow0RTT = func() bool { return conf.Allow0RTT(conn.RemoteAddr()) } + } cs := handshake.NewCryptoSetupServer( initialStream, handshakeStream, @@ -340,29 +344,15 @@ var newConnection = func( }, }, tlsConf, - enable0RTT, + allow0RTT, s.rttStats, tracer, logger, s.version, ) s.cryptoStreamHandler = cs - s.packer = newPacketPacker( - srcConnID, - s.connIDManager.Get, - initialStream, - handshakeStream, - s.sentPacketHandler, - s.retransmissionQueue, - s.RemoteAddr(), - cs, - s.framer, - s.receivedPacketHandler, - s.datagramQueue, - s.perspective, - s.version, - ) - s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen, s.version) + s.packer = newPacketPacker(srcConnID, s.connIDManager.Get, initialStream, handshakeStream, s.sentPacketHandler, s.retransmissionQueue, s.RemoteAddr(), cs, s.framer, s.receivedPacketHandler, s.datagramQueue, s.perspective) + s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen) s.cryptoStreamManager = newCryptoStreamManager(cs, initialStream, handshakeStream, s.oneRTTStream) return s } @@ -413,7 +403,6 @@ var newClientConnection = func( runner.ReplaceWithClosed, s.queueControlFrame, s.config.ConnectionIDGenerator, - s.version, ) s.preSetup() s.ctx, s.ctxCancel = context.WithCancel(context.WithValue(context.Background(), ConnectionTracingKey, tracingID)) @@ -425,7 +414,6 @@ var newClientConnection = func( s.perspective, s.tracer, s.logger, - s.version, ) initialStream := newCryptoStream() handshakeStream := newCryptoStream() @@ -474,22 +462,8 @@ var newClientConnection = func( s.clientHelloWritten = clientHelloWritten s.cryptoStreamHandler = cs s.cryptoStreamManager = newCryptoStreamManager(cs, initialStream, handshakeStream, newCryptoStream()) - s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen, s.version) - s.packer = newPacketPacker( - srcConnID, - s.connIDManager.Get, - initialStream, - handshakeStream, - s.sentPacketHandler, - s.retransmissionQueue, - s.RemoteAddr(), - cs, - s.framer, - s.receivedPacketHandler, - s.datagramQueue, - s.perspective, - s.version, - ) + s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen) + s.packer = newPacketPacker(srcConnID, s.connIDManager.Get, initialStream, handshakeStream, s.sentPacketHandler, s.retransmissionQueue, s.RemoteAddr(), cs, s.framer, s.receivedPacketHandler, s.datagramQueue, s.perspective) if len(tlsConf.ServerName) > 0 { s.tokenStoreKey = tlsConf.ServerName } else { @@ -505,8 +479,8 @@ var newClientConnection = func( func (s *connection) preSetup() { s.sendQueue = newSendQueue(s.conn) - s.retransmissionQueue = newRetransmissionQueue(s.version) - s.frameParser = wire.NewFrameParser(s.config.EnableDatagrams, s.version) + s.retransmissionQueue = newRetransmissionQueue() + s.frameParser = wire.NewFrameParser(s.config.EnableDatagrams) s.rttStats = &utils.RTTStats{} s.connFlowController = flowcontrol.NewConnectionFlowController( protocol.ByteCount(s.config.InitialConnectionReceiveWindow), @@ -528,9 +502,8 @@ func (s *connection) preSetup() { uint64(s.config.MaxIncomingStreams), uint64(s.config.MaxIncomingUniStreams), s.perspective, - s.version, ) - s.framer = newFramer(s.streamsMap, s.version) + s.framer = newFramer(s.streamsMap) s.receivedPackets = make(chan *receivedPacket, protocol.MaxConnUnprocessedPackets) s.closeChan = make(chan closeError, 1) s.sendingScheduled = make(chan struct{}, 1) @@ -542,6 +515,7 @@ func (s *connection) preSetup() { s.windowUpdateQueue = newWindowUpdateQueue(s.streamsMap, s.connFlowController, s.framer.QueueControlFrame) s.datagramQueue = newDatagramQueue(s.scheduleSending, s.logger) + s.connState.Version = s.version } // run the connection main loop @@ -735,11 +709,10 @@ func (s *connection) supportsDatagrams() bool { } func (s *connection) ConnectionState() ConnectionState { - return ConnectionState{ - TLS: s.cryptoStreamHandler.ConnectionState(), - SupportsDatagrams: s.supportsDatagrams(), - Version: s.version, - } + s.connStateMutex.Lock() + defer s.connStateMutex.Unlock() + s.connState.TLS = s.cryptoStreamHandler.ConnectionState() + return s.connState } // Time when the next keep-alive packet should be sent. @@ -875,7 +848,7 @@ func (s *connection) handlePacketImpl(rp *receivedPacket) bool { } if wire.IsLongHeaderPacket(p.data[0]) { - hdr, packetData, rest, err := wire.ParsePacket(p.data, s.srcConnIDLen) + hdr, packetData, rest, err := wire.ParsePacket(p.data) if err != nil { if s.tracer != nil { dropReason := logging.PacketDropHeaderParseError @@ -1008,7 +981,7 @@ func (s *connection) handleLongHeaderPacket(p *receivedPacket, hdr *wire.Header) return false } - packet, err := s.unpacker.UnpackLongHeader(hdr, p.rcvTime, p.data) + packet, err := s.unpacker.UnpackLongHeader(hdr, p.rcvTime, p.data, s.version) if err != nil { wasQueued = s.handleUnpackError(err, p, logging.PacketTypeFromHeader(hdr)) return false @@ -1027,7 +1000,7 @@ func (s *connection) handleLongHeaderPacket(p *receivedPacket, hdr *wire.Header) return false } - if err := s.handleUnpackedPacket(packet, p.ecn, p.rcvTime, p.Size()); err != nil { + if err := s.handleUnpackedLongHeaderPacket(packet, p.ecn, p.rcvTime, p.Size()); err != nil { s.closeLocal(err) return false } @@ -1190,7 +1163,7 @@ func (s *connection) handleVersionNegotiationPacket(p *receivedPacket) { }) } -func (s *connection) handleUnpackedPacket( +func (s *connection) handleUnpackedLongHeaderPacket( packet *unpackedPacket, ecn protocol.ECN, rcvTime time.Time, @@ -1209,7 +1182,7 @@ func (s *connection) handleUnpackedPacket( s.tracer.NegotiatedVersion(s.version, clientVersions, serverVersions) } // The server can change the source connection ID with the first Handshake packet. - if s.perspective == protocol.PerspectiveClient && packet.hdr.IsLongHeader && packet.hdr.SrcConnectionID != s.handshakeDestConnID { + if s.perspective == protocol.PerspectiveClient && packet.hdr.SrcConnectionID != s.handshakeDestConnID { cid := packet.hdr.SrcConnectionID s.logger.Debugf("Received first packet. Switching destination connection ID to: %s", cid) s.handshakeDestConnID = cid @@ -1282,7 +1255,7 @@ func (s *connection) handleFrames( // If we're not tracing, this slice will always remain empty. var frames []wire.Frame for len(data) > 0 { - l, frame, err := s.frameParser.ParseNext(data, encLevel) + l, frame, err := s.frameParser.ParseNext(data, encLevel, s.version) if err != nil { return false, err } @@ -1675,6 +1648,9 @@ func (s *connection) restoreTransportParameters(params *wire.TransportParameters s.connIDGenerator.SetMaxActiveConnIDs(params.ActiveConnectionIDLimit) s.connFlowController.UpdateSendWindow(params.InitialMaxData) s.streamsMap.UpdateLimits(params) + s.connStateMutex.Lock() + s.connState.SupportsDatagrams = s.supportsDatagrams() + s.connStateMutex.Unlock() } func (s *connection) handleTransportParameters(params *wire.TransportParameters) { @@ -1683,6 +1659,7 @@ func (s *connection) handleTransportParameters(params *wire.TransportParameters) ErrorCode: qerr.TransportParameterError, ErrorMessage: err.Error(), }) + return } s.peerParams = params // On the client side we have to wait for handshake completion. @@ -1693,6 +1670,10 @@ func (s *connection) handleTransportParameters(params *wire.TransportParameters) // the client's transport parameters. close(s.earlyConnReadyChan) } + + s.connStateMutex.Lock() + s.connState.SupportsDatagrams = s.supportsDatagrams() + s.connStateMutex.Unlock() } func (s *connection) checkTransportParameters(params *wire.TransportParameters) error { @@ -1817,43 +1798,40 @@ func (s *connection) sendPackets() error { func (s *connection) maybeSendAckOnlyPacket() error { if !s.handshakeConfirmed { - packet, err := s.packer.PackCoalescedPacket(true) + packet, err := s.packer.PackCoalescedPacket(true, s.version) if err != nil { return err } if packet == nil { return nil } - s.logCoalescedPacket(packet) - for _, p := range packet.packets { - s.sentPacketHandler.SentPacket(p.ToAckHandlerPacket(time.Now(), s.retransmissionQueue)) - } - s.connIDManager.SentPacket() - s.sendQueue.Send(packet.buffer) + s.sendPackedCoalescedPacket(packet, time.Now()) return nil } - packet, err := s.packer.PackPacket(true) + now := time.Now() + p, buffer, err := s.packer.PackPacket(true, now, s.version) if err != nil { + if err == errNothingToPack { + return nil + } return err } - if packet == nil { - return nil - } - s.sendPackedPacket(packet, time.Now()) + s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, buffer.Len(), false) + s.sendPackedShortHeaderPacket(buffer, p.Packet, now) return nil } func (s *connection) sendProbePacket(encLevel protocol.EncryptionLevel) error { // Queue probe packets until we actually send out a packet, // or until there are no more packets to queue. - var packet *packedPacket + var packet *coalescedPacket for { if wasQueued := s.sentPacketHandler.QueueProbePacket(encLevel); !wasQueued { break } var err error - packet, err = s.packer.MaybePackProbePacket(encLevel) + packet, err = s.packer.MaybePackProbePacket(encLevel, s.version) if err != nil { return err } @@ -1874,15 +1852,15 @@ func (s *connection) sendProbePacket(encLevel protocol.EncryptionLevel) error { panic("unexpected encryption level") } var err error - packet, err = s.packer.MaybePackProbePacket(encLevel) + packet, err = s.packer.MaybePackProbePacket(encLevel, s.version) if err != nil { return err } } - if packet == nil || packet.packetContents == nil { + if packet == nil || (len(packet.longHdrPackets) == 0 && packet.shortHdrPacket == nil) { return fmt.Errorf("connection BUG: couldn't pack %s probe packet", encLevel) } - s.sendPackedPacket(packet, time.Now()) + s.sendPackedCoalescedPacket(packet, time.Now()) return nil } @@ -1894,44 +1872,59 @@ func (s *connection) sendPacket() (bool, error) { now := time.Now() if !s.handshakeConfirmed { - packet, err := s.packer.PackCoalescedPacket(false) + packet, err := s.packer.PackCoalescedPacket(false, s.version) if err != nil || packet == nil { return false, err } s.sentFirstPacket = true - s.logCoalescedPacket(packet) - for _, p := range packet.packets { - if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && p.IsAckEliciting() { - s.firstAckElicitingPacketAfterIdleSentTime = now - } - s.sentPacketHandler.SentPacket(p.ToAckHandlerPacket(now, s.retransmissionQueue)) - } - s.connIDManager.SentPacket() - s.sendQueue.Send(packet.buffer) + s.sendPackedCoalescedPacket(packet, now) return true, nil - } - if !s.config.DisablePathMTUDiscovery && s.mtuDiscoverer.ShouldSendProbe(now) { - packet, err := s.packer.PackMTUProbePacket(s.mtuDiscoverer.GetPing()) + } else if !s.config.DisablePathMTUDiscovery && s.mtuDiscoverer.ShouldSendProbe(now) { + ping, size := s.mtuDiscoverer.GetPing() + p, buffer, err := s.packer.PackMTUProbePacket(ping, size, now, s.version) if err != nil { return false, err } - s.sendPackedPacket(packet, now) + s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, buffer.Len(), false) + s.sendPackedShortHeaderPacket(buffer, p.Packet, now) return true, nil } - packet, err := s.packer.PackPacket(false) - if err != nil || packet == nil { + p, buffer, err := s.packer.PackPacket(false, now, s.version) + if err != nil { + if err == errNothingToPack { + return false, nil + } return false, err } - s.sendPackedPacket(packet, now) + s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, buffer.Len(), false) + s.sendPackedShortHeaderPacket(buffer, p.Packet, now) return true, nil } -func (s *connection) sendPackedPacket(packet *packedPacket, now time.Time) { - if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && packet.IsAckEliciting() { +func (s *connection) sendPackedShortHeaderPacket(buffer *packetBuffer, p *ackhandler.Packet, now time.Time) { + if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && ackhandler.HasAckElicitingFrames(p.Frames) { s.firstAckElicitingPacketAfterIdleSentTime = now } - s.logPacket(packet) - s.sentPacketHandler.SentPacket(packet.ToAckHandlerPacket(now, s.retransmissionQueue)) + + s.sentPacketHandler.SentPacket(p) + s.connIDManager.SentPacket() + s.sendQueue.Send(buffer) +} + +func (s *connection) sendPackedCoalescedPacket(packet *coalescedPacket, now time.Time) { + s.logCoalescedPacket(packet) + for _, p := range packet.longHdrPackets { + if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && p.IsAckEliciting() { + s.firstAckElicitingPacketAfterIdleSentTime = now + } + s.sentPacketHandler.SentPacket(p.ToAckHandlerPacket(now, s.retransmissionQueue)) + } + if p := packet.shortHdrPacket; p != nil { + if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && p.IsAckEliciting() { + s.firstAckElicitingPacketAfterIdleSentTime = now + } + s.sentPacketHandler.SentPacket(p.Packet) + } s.connIDManager.SentPacket() s.sendQueue.Send(packet.buffer) } @@ -1942,14 +1935,14 @@ func (s *connection) sendConnectionClose(e error) ([]byte, error) { var transportErr *qerr.TransportError var applicationErr *qerr.ApplicationError if errors.As(e, &transportErr) { - packet, err = s.packer.PackConnectionClose(transportErr) + packet, err = s.packer.PackConnectionClose(transportErr, s.version) } else if errors.As(e, &applicationErr) { - packet, err = s.packer.PackApplicationClose(applicationErr) + packet, err = s.packer.PackApplicationClose(applicationErr, s.version) } else { packet, err = s.packer.PackConnectionClose(&qerr.TransportError{ ErrorCode: qerr.InternalError, ErrorMessage: fmt.Sprintf("connection BUG: unspecified error type (msg: %s)", e.Error()), - }) + }, s.version) } if err != nil { return nil, err @@ -1958,7 +1951,18 @@ func (s *connection) sendConnectionClose(e error) ([]byte, error) { return packet.buffer.Data, s.conn.Write(packet.buffer.Data) } -func (s *connection) logPacketContents(p *packetContents) { +func (s *connection) logLongHeaderPacket(p *longHeaderPacket) { + // quic-go logging + if s.logger.Debug() { + p.header.Log(s.logger) + if p.ack != nil { + wire.LogFrame(s.logger, p.ack, true) + } + for _, frame := range p.frames { + wire.LogFrame(s.logger, frame.Frame, true) + } + } + // tracing if s.tracer != nil { frames := make([]logging.Frame, 0, len(p.frames)) @@ -1969,40 +1973,87 @@ func (s *connection) logPacketContents(p *packetContents) { if p.ack != nil { ack = logutils.ConvertAckFrame(p.ack) } - s.tracer.SentPacket(p.header, p.length, ack, frames) + s.tracer.SentLongHeaderPacket(p.header, p.length, ack, frames) } +} - // quic-go logging - if !s.logger.Debug() { - return +func (s *connection) logShortHeaderPacket( + destConnID protocol.ConnectionID, + ackFrame *wire.AckFrame, + frames []*ackhandler.Frame, + pn protocol.PacketNumber, + pnLen protocol.PacketNumberLen, + kp protocol.KeyPhaseBit, + size protocol.ByteCount, + isCoalesced bool, +) { + if s.logger.Debug() && !isCoalesced { + s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, 1-RTT", pn, size, s.logID) } - p.header.Log(s.logger) - if p.ack != nil { - wire.LogFrame(s.logger, p.ack, true) + // quic-go logging + if s.logger.Debug() { + wire.LogShortHeader(s.logger, destConnID, pn, pnLen, kp) + if ackFrame != nil { + wire.LogFrame(s.logger, ackFrame, true) + } + for _, frame := range frames { + wire.LogFrame(s.logger, frame.Frame, true) + } } - for _, frame := range p.frames { - wire.LogFrame(s.logger, frame.Frame, true) + + // tracing + if s.tracer != nil { + fs := make([]logging.Frame, 0, len(frames)) + for _, f := range frames { + fs = append(fs, logutils.ConvertFrame(f.Frame)) + } + var ack *logging.AckFrame + if ackFrame != nil { + ack = logutils.ConvertAckFrame(ackFrame) + } + s.tracer.SentShortHeaderPacket( + &logging.ShortHeader{ + DestConnectionID: destConnID, + PacketNumber: pn, + PacketNumberLen: pnLen, + KeyPhase: kp, + }, + size, + ack, + fs, + ) } } func (s *connection) logCoalescedPacket(packet *coalescedPacket) { if s.logger.Debug() { - if len(packet.packets) > 1 { - s.logger.Debugf("-> Sending coalesced packet (%d parts, %d bytes) for connection %s", len(packet.packets), packet.buffer.Len(), s.logID) + // There's a short period between dropping both Initial and Handshake keys and completion of the handshake, + // during which we might call PackCoalescedPacket but just pack a short header packet. + if len(packet.longHdrPackets) == 0 && packet.shortHdrPacket != nil { + s.logShortHeaderPacket( + packet.shortHdrPacket.DestConnID, + packet.shortHdrPacket.Ack, + packet.shortHdrPacket.Frames, + packet.shortHdrPacket.PacketNumber, + packet.shortHdrPacket.PacketNumberLen, + packet.shortHdrPacket.KeyPhase, + packet.shortHdrPacket.Length, + false, + ) + return + } + if len(packet.longHdrPackets) > 1 { + s.logger.Debugf("-> Sending coalesced packet (%d parts, %d bytes) for connection %s", len(packet.longHdrPackets), packet.buffer.Len(), s.logID) } else { - s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, %s", packet.packets[0].header.PacketNumber, packet.buffer.Len(), s.logID, packet.packets[0].EncryptionLevel()) + s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, %s", packet.longHdrPackets[0].header.PacketNumber, packet.buffer.Len(), s.logID, packet.longHdrPackets[0].EncryptionLevel()) } } - for _, p := range packet.packets { - s.logPacketContents(p) + for _, p := range packet.longHdrPackets { + s.logLongHeaderPacket(p) } -} - -func (s *connection) logPacket(packet *packedPacket) { - if s.logger.Debug() { - s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, %s", packet.header.PacketNumber, packet.buffer.Len(), s.logID, packet.EncryptionLevel()) + if p := packet.shortHdrPacket; p != nil { + s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, p.Length, true) } - s.logPacketContents(packet.packetContents) } // AcceptStream returns the next stream openend by the peer diff --git a/vendor/github.com/lucas-clemente/quic-go/connection_timer.go b/vendor/github.com/quic-go/quic-go/connection_timer.go similarity index 96% rename from vendor/github.com/lucas-clemente/quic-go/connection_timer.go rename to vendor/github.com/quic-go/quic-go/connection_timer.go index 1c13cfb6..171fdd01 100644 --- a/vendor/github.com/lucas-clemente/quic-go/connection_timer.go +++ b/vendor/github.com/quic-go/quic-go/connection_timer.go @@ -3,7 +3,7 @@ package quic import ( "time" - "github.com/lucas-clemente/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/utils" ) var deadlineSendImmediately = time.Time{}.Add(42 * time.Millisecond) // any value > time.Time{} and before time.Now() is fine diff --git a/vendor/github.com/lucas-clemente/quic-go/crypto_stream.go b/vendor/github.com/quic-go/quic-go/crypto_stream.go similarity index 93% rename from vendor/github.com/lucas-clemente/quic-go/crypto_stream.go rename to vendor/github.com/quic-go/quic-go/crypto_stream.go index aa90b15a..f10e9120 100644 --- a/vendor/github.com/lucas-clemente/quic-go/crypto_stream.go +++ b/vendor/github.com/quic-go/quic-go/crypto_stream.go @@ -4,10 +4,10 @@ import ( "fmt" "io" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" ) type cryptoStream interface { diff --git a/vendor/github.com/lucas-clemente/quic-go/crypto_stream_manager.go b/vendor/github.com/quic-go/quic-go/crypto_stream_manager.go similarity index 93% rename from vendor/github.com/lucas-clemente/quic-go/crypto_stream_manager.go rename to vendor/github.com/quic-go/quic-go/crypto_stream_manager.go index 66f90049..91946acf 100644 --- a/vendor/github.com/lucas-clemente/quic-go/crypto_stream_manager.go +++ b/vendor/github.com/quic-go/quic-go/crypto_stream_manager.go @@ -3,8 +3,8 @@ package quic import ( "fmt" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/wire" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/wire" ) type cryptoDataHandler interface { diff --git a/vendor/github.com/lucas-clemente/quic-go/datagram_queue.go b/vendor/github.com/quic-go/quic-go/datagram_queue.go similarity index 68% rename from vendor/github.com/lucas-clemente/quic-go/datagram_queue.go rename to vendor/github.com/quic-go/quic-go/datagram_queue.go index 7bedfe65..59c7d069 100644 --- a/vendor/github.com/lucas-clemente/quic-go/datagram_queue.go +++ b/vendor/github.com/quic-go/quic-go/datagram_queue.go @@ -1,15 +1,20 @@ package quic import ( - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" + "sync" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" ) type datagramQueue struct { sendQueue chan *wire.DatagramFrame nextFrame *wire.DatagramFrame - rcvQueue chan []byte + + rcvMx sync.Mutex + rcvQueue [][]byte + rcvd chan struct{} // used to notify Receive that a new datagram was received closeErr error closed chan struct{} @@ -25,7 +30,7 @@ func newDatagramQueue(hasData func(), logger utils.Logger) *datagramQueue { return &datagramQueue{ hasData: hasData, sendQueue: make(chan *wire.DatagramFrame, 1), - rcvQueue: make(chan []byte, protocol.DatagramRcvQueueLen), + rcvd: make(chan struct{}, 1), dequeued: make(chan struct{}), closed: make(chan struct{}), logger: logger, @@ -76,20 +81,39 @@ func (h *datagramQueue) Pop() { func (h *datagramQueue) HandleDatagramFrame(f *wire.DatagramFrame) { data := make([]byte, len(f.Data)) copy(data, f.Data) - select { - case h.rcvQueue <- data: - default: + var queued bool + h.rcvMx.Lock() + if len(h.rcvQueue) < protocol.DatagramRcvQueueLen { + h.rcvQueue = append(h.rcvQueue, data) + queued = true + select { + case h.rcvd <- struct{}{}: + default: + } + } + h.rcvMx.Unlock() + if !queued && h.logger.Debug() { h.logger.Debugf("Discarding DATAGRAM frame (%d bytes payload)", len(f.Data)) } } // Receive gets a received DATAGRAM frame. func (h *datagramQueue) Receive() ([]byte, error) { - select { - case data := <-h.rcvQueue: - return data, nil - case <-h.closed: - return nil, h.closeErr + for { + h.rcvMx.Lock() + if len(h.rcvQueue) > 0 { + data := h.rcvQueue[0] + h.rcvQueue = h.rcvQueue[1:] + h.rcvMx.Unlock() + return data, nil + } + h.rcvMx.Unlock() + select { + case <-h.rcvd: + continue + case <-h.closed: + return nil, h.closeErr + } } } diff --git a/vendor/github.com/lucas-clemente/quic-go/errors.go b/vendor/github.com/quic-go/quic-go/errors.go similarity index 89% rename from vendor/github.com/lucas-clemente/quic-go/errors.go rename to vendor/github.com/quic-go/quic-go/errors.go index 0c9f0004..c9fb0a07 100644 --- a/vendor/github.com/lucas-clemente/quic-go/errors.go +++ b/vendor/github.com/quic-go/quic-go/errors.go @@ -3,7 +3,7 @@ package quic import ( "fmt" - "github.com/lucas-clemente/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/qerr" ) type ( @@ -46,6 +46,7 @@ const ( type StreamError struct { StreamID StreamID ErrorCode StreamErrorCode + Remote bool } func (e *StreamError) Is(target error) bool { @@ -54,5 +55,9 @@ func (e *StreamError) Is(target error) bool { } func (e *StreamError) Error() string { - return fmt.Sprintf("stream %d canceled with error code %d", e.StreamID, e.ErrorCode) + pers := "local" + if e.Remote { + pers = "remote" + } + return fmt.Sprintf("stream %d canceled by %s with error code %d", e.StreamID, pers, e.ErrorCode) } diff --git a/vendor/github.com/lucas-clemente/quic-go/frame_sorter.go b/vendor/github.com/quic-go/quic-go/frame_sorter.go similarity index 95% rename from vendor/github.com/lucas-clemente/quic-go/frame_sorter.go rename to vendor/github.com/quic-go/quic-go/frame_sorter.go index 0573ade9..bee0abad 100644 --- a/vendor/github.com/lucas-clemente/quic-go/frame_sorter.go +++ b/vendor/github.com/quic-go/quic-go/frame_sorter.go @@ -2,9 +2,10 @@ package quic import ( "errors" + "sync" - "github.com/lucas-clemente/quic-go/internal/protocol" - list "github.com/lucas-clemente/quic-go/internal/utils/linkedlist" + "github.com/quic-go/quic-go/internal/protocol" + list "github.com/quic-go/quic-go/internal/utils/linkedlist" ) // byteInterval is an interval from one ByteCount to the other @@ -13,6 +14,12 @@ type byteInterval struct { End protocol.ByteCount } +var byteIntervalElementPool sync.Pool + +func init() { + byteIntervalElementPool = *list.NewPool[byteInterval]() +} + type frameSorterEntry struct { Data []byte DoneCb func() @@ -28,7 +35,7 @@ var errDuplicateStreamData = errors.New("duplicate stream data") func newFrameSorter() *frameSorter { s := frameSorter{ - gaps: list.New[byteInterval](), + gaps: list.NewWithPool[byteInterval](&byteIntervalElementPool), queue: make(map[protocol.ByteCount]frameSorterEntry), } s.gaps.PushFront(byteInterval{Start: 0, End: protocol.MaxByteCount}) diff --git a/vendor/github.com/lucas-clemente/quic-go/framer.go b/vendor/github.com/quic-go/quic-go/framer.go similarity index 77% rename from vendor/github.com/lucas-clemente/quic-go/framer.go rename to vendor/github.com/quic-go/quic-go/framer.go index 29d36b85..0b205916 100644 --- a/vendor/github.com/lucas-clemente/quic-go/framer.go +++ b/vendor/github.com/quic-go/quic-go/framer.go @@ -4,20 +4,20 @@ import ( "errors" "sync" - "github.com/lucas-clemente/quic-go/internal/ackhandler" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/wire" - "github.com/lucas-clemente/quic-go/quicvarint" + "github.com/quic-go/quic-go/internal/ackhandler" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/wire" + "github.com/quic-go/quic-go/quicvarint" ) type framer interface { HasData() bool QueueControlFrame(wire.Frame) - AppendControlFrames([]ackhandler.Frame, protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount) + AppendControlFrames([]*ackhandler.Frame, protocol.ByteCount, protocol.VersionNumber) ([]*ackhandler.Frame, protocol.ByteCount) AddActiveStream(protocol.StreamID) - AppendStreamFrames([]ackhandler.Frame, protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount) + AppendStreamFrames([]*ackhandler.Frame, protocol.ByteCount, protocol.VersionNumber) ([]*ackhandler.Frame, protocol.ByteCount) Handle0RTTRejection() error } @@ -26,7 +26,6 @@ type framerI struct { mutex sync.Mutex streamGetter streamGetter - version protocol.VersionNumber activeStreams map[protocol.StreamID]struct{} streamQueue []protocol.StreamID @@ -37,14 +36,10 @@ type framerI struct { var _ framer = &framerI{} -func newFramer( - streamGetter streamGetter, - v protocol.VersionNumber, -) framer { +func newFramer(streamGetter streamGetter) framer { return &framerI{ streamGetter: streamGetter, activeStreams: make(map[protocol.StreamID]struct{}), - version: v, } } @@ -67,16 +62,18 @@ func (f *framerI) QueueControlFrame(frame wire.Frame) { f.controlFrameMutex.Unlock() } -func (f *framerI) AppendControlFrames(frames []ackhandler.Frame, maxLen protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount) { +func (f *framerI) AppendControlFrames(frames []*ackhandler.Frame, maxLen protocol.ByteCount, v protocol.VersionNumber) ([]*ackhandler.Frame, protocol.ByteCount) { var length protocol.ByteCount f.controlFrameMutex.Lock() for len(f.controlFrames) > 0 { frame := f.controlFrames[len(f.controlFrames)-1] - frameLen := frame.Length(f.version) + frameLen := frame.Length(v) if length+frameLen > maxLen { break } - frames = append(frames, ackhandler.Frame{Frame: frame}) + af := ackhandler.GetFrame() + af.Frame = frame + frames = append(frames, af) length += frameLen f.controlFrames = f.controlFrames[:len(f.controlFrames)-1] } @@ -93,7 +90,7 @@ func (f *framerI) AddActiveStream(id protocol.StreamID) { f.mutex.Unlock() } -func (f *framerI) AppendStreamFrames(frames []ackhandler.Frame, maxLen protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount) { +func (f *framerI) AppendStreamFrames(frames []*ackhandler.Frame, maxLen protocol.ByteCount, v protocol.VersionNumber) ([]*ackhandler.Frame, protocol.ByteCount) { var length protocol.ByteCount var lastFrame *ackhandler.Frame f.mutex.Lock() @@ -118,7 +115,7 @@ func (f *framerI) AppendStreamFrames(frames []ackhandler.Frame, maxLen protocol. // Therefore, we can pretend to have more bytes available when popping // the STREAM frame (which will always have the DataLen set). remainingLen += quicvarint.Len(uint64(remainingLen)) - frame, hasMoreData := str.popStreamFrame(remainingLen) + frame, hasMoreData := str.popStreamFrame(remainingLen, v) if hasMoreData { // put the stream back in the queue (at the end) f.streamQueue = append(f.streamQueue, id) } else { // no more data to send. Stream is not active any more @@ -130,16 +127,16 @@ func (f *framerI) AppendStreamFrames(frames []ackhandler.Frame, maxLen protocol. if frame == nil { continue } - frames = append(frames, *frame) - length += frame.Length(f.version) + frames = append(frames, frame) + length += frame.Length(v) lastFrame = frame } f.mutex.Unlock() if lastFrame != nil { - lastFrameLen := lastFrame.Length(f.version) + lastFrameLen := lastFrame.Length(v) // account for the smaller size of the last STREAM frame lastFrame.Frame.(*wire.StreamFrame).DataLenPresent = false - length += lastFrame.Length(f.version) - lastFrameLen + length += lastFrame.Length(v) - lastFrameLen } return frames, length } diff --git a/vendor/github.com/quic-go/quic-go/http3/body.go b/vendor/github.com/quic-go/quic-go/http3/body.go new file mode 100644 index 00000000..15985a1c --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/http3/body.go @@ -0,0 +1,135 @@ +package http3 + +import ( + "context" + "io" + "net" + + "github.com/quic-go/quic-go" +) + +// The HTTPStreamer allows taking over a HTTP/3 stream. The interface is implemented by: +// * for the server: the http.Request.Body +// * for the client: the http.Response.Body +// On the client side, the stream will be closed for writing, unless the DontCloseRequestStream RoundTripOpt was set. +// When a stream is taken over, it's the caller's responsibility to close the stream. +type HTTPStreamer interface { + HTTPStream() Stream +} + +type StreamCreator interface { + // Context returns a context that is cancelled when the underlying connection is closed. + Context() context.Context + OpenStream() (quic.Stream, error) + OpenStreamSync(context.Context) (quic.Stream, error) + OpenUniStream() (quic.SendStream, error) + OpenUniStreamSync(context.Context) (quic.SendStream, error) + LocalAddr() net.Addr + RemoteAddr() net.Addr + ConnectionState() quic.ConnectionState +} + +var _ StreamCreator = quic.Connection(nil) + +// A Hijacker allows hijacking of the stream creating part of a quic.Session from a http.Response.Body. +// It is used by WebTransport to create WebTransport streams after a session has been established. +type Hijacker interface { + StreamCreator() StreamCreator +} + +// The body of a http.Request or http.Response. +type body struct { + str quic.Stream + + wasHijacked bool // set when HTTPStream is called +} + +var ( + _ io.ReadCloser = &body{} + _ HTTPStreamer = &body{} +) + +func newRequestBody(str Stream) *body { + return &body{str: str} +} + +func (r *body) HTTPStream() Stream { + r.wasHijacked = true + return r.str +} + +func (r *body) wasStreamHijacked() bool { + return r.wasHijacked +} + +func (r *body) Read(b []byte) (int, error) { + return r.str.Read(b) +} + +func (r *body) Close() error { + r.str.CancelRead(quic.StreamErrorCode(errorRequestCanceled)) + return nil +} + +type hijackableBody struct { + body + conn quic.Connection // only needed to implement Hijacker + + // only set for the http.Response + // The channel is closed when the user is done with this response: + // either when Read() errors, or when Close() is called. + reqDone chan<- struct{} + reqDoneClosed bool +} + +var ( + _ Hijacker = &hijackableBody{} + _ HTTPStreamer = &hijackableBody{} +) + +func newResponseBody(str Stream, conn quic.Connection, done chan<- struct{}) *hijackableBody { + return &hijackableBody{ + body: body{ + str: str, + }, + reqDone: done, + conn: conn, + } +} + +func (r *hijackableBody) StreamCreator() StreamCreator { + return r.conn +} + +func (r *hijackableBody) Read(b []byte) (int, error) { + n, err := r.str.Read(b) + if err != nil { + r.requestDone() + } + return n, err +} + +func (r *hijackableBody) requestDone() { + if r.reqDoneClosed || r.reqDone == nil { + return + } + if r.reqDone != nil { + close(r.reqDone) + } + r.reqDoneClosed = true +} + +func (r *body) StreamID() quic.StreamID { + return r.str.StreamID() +} + +func (r *hijackableBody) Close() error { + r.requestDone() + // If the EOF was read, CancelRead() is a no-op. + r.str.CancelRead(quic.StreamErrorCode(errorRequestCanceled)) + return nil +} + +func (r *hijackableBody) HTTPStream() Stream { + return r.str +} diff --git a/vendor/github.com/quic-go/quic-go/http3/capsule.go b/vendor/github.com/quic-go/quic-go/http3/capsule.go new file mode 100644 index 00000000..7bdcd4e5 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/http3/capsule.go @@ -0,0 +1,55 @@ +package http3 + +import ( + "io" + + "github.com/quic-go/quic-go/quicvarint" +) + +// CapsuleType is the type of the capsule. +type CapsuleType uint64 + +type exactReader struct { + R *io.LimitedReader +} + +func (r *exactReader) Read(b []byte) (int, error) { + n, err := r.R.Read(b) + if r.R.N > 0 { + return n, io.ErrUnexpectedEOF + } + return n, err +} + +// ParseCapsule parses the header of a Capsule. +// It returns an io.LimitedReader that can be used to read the Capsule value. +// The Capsule value must be read entirely (i.e. until the io.EOF) before using r again. +func ParseCapsule(r quicvarint.Reader) (CapsuleType, io.Reader, error) { + ct, err := quicvarint.Read(r) + if err != nil { + if err == io.EOF { + return 0, nil, io.ErrUnexpectedEOF + } + return 0, nil, err + } + l, err := quicvarint.Read(r) + if err != nil { + if err == io.EOF { + return 0, nil, io.ErrUnexpectedEOF + } + return 0, nil, err + } + return CapsuleType(ct), &exactReader{R: io.LimitReader(r, int64(l)).(*io.LimitedReader)}, nil +} + +// WriteCapsule writes a capsule +func WriteCapsule(w quicvarint.Writer, ct CapsuleType, value []byte) error { + b := make([]byte, 0, 16) + b = quicvarint.Append(b, uint64(ct)) + b = quicvarint.Append(b, uint64(len(value))) + if _, err := w.Write(b); err != nil { + return err + } + _, err := w.Write(value) + return err +} diff --git a/vendor/github.com/quic-go/quic-go/http3/client.go b/vendor/github.com/quic-go/quic-go/http3/client.go new file mode 100644 index 00000000..c63505e1 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/http3/client.go @@ -0,0 +1,457 @@ +package http3 + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "net/http" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/quic-go/quic-go" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qtls" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/quicvarint" + + "github.com/quic-go/qpack" +) + +// MethodGet0RTT allows a GET request to be sent using 0-RTT. +// Note that 0-RTT data doesn't provide replay protection. +const MethodGet0RTT = "GET_0RTT" + +const ( + defaultUserAgent = "quic-go HTTP/3" + defaultMaxResponseHeaderBytes = 10 * 1 << 20 // 10 MB +) + +var defaultQuicConfig = &quic.Config{ + MaxIncomingStreams: -1, // don't allow the server to create bidirectional streams + KeepAlivePeriod: 10 * time.Second, + Versions: []protocol.VersionNumber{protocol.VersionTLS}, +} + +type dialFunc func(ctx context.Context, addr string, tlsCfg *tls.Config, cfg *quic.Config) (quic.EarlyConnection, error) + +var dialAddr = quic.DialAddrEarlyContext + +type roundTripperOpts struct { + DisableCompression bool + EnableDatagram bool + MaxHeaderBytes int64 + AdditionalSettings map[uint64]uint64 + StreamHijacker func(FrameType, quic.Connection, quic.Stream, error) (hijacked bool, err error) + UniStreamHijacker func(StreamType, quic.Connection, quic.ReceiveStream, error) (hijacked bool) +} + +// client is a HTTP3 client doing requests +type client struct { + tlsConf *tls.Config + config *quic.Config + opts *roundTripperOpts + + dialOnce sync.Once + dialer dialFunc + handshakeErr error + + requestWriter *requestWriter + + decoder *qpack.Decoder + + hostname string + conn atomic.Pointer[quic.EarlyConnection] + + logger utils.Logger +} + +var _ roundTripCloser = &client{} + +func newClient(hostname string, tlsConf *tls.Config, opts *roundTripperOpts, conf *quic.Config, dialer dialFunc) (roundTripCloser, error) { + if conf == nil { + conf = defaultQuicConfig.Clone() + } else if len(conf.Versions) == 0 { + conf = conf.Clone() + conf.Versions = []quic.VersionNumber{defaultQuicConfig.Versions[0]} + } + if len(conf.Versions) != 1 { + return nil, errors.New("can only use a single QUIC version for dialing a HTTP/3 connection") + } + if conf.MaxIncomingStreams == 0 { + conf.MaxIncomingStreams = -1 // don't allow any bidirectional streams + } + conf.EnableDatagrams = opts.EnableDatagram + logger := utils.DefaultLogger.WithPrefix("h3 client") + + if tlsConf == nil { + tlsConf = &tls.Config{} + } else { + tlsConf = tlsConf.Clone() + } + // Replace existing ALPNs by H3 + tlsConf.NextProtos = []string{versionToALPN(conf.Versions[0])} + + return &client{ + hostname: authorityAddr("https", hostname), + tlsConf: tlsConf, + requestWriter: newRequestWriter(logger), + decoder: qpack.NewDecoder(func(hf qpack.HeaderField) {}), + config: conf, + opts: opts, + dialer: dialer, + logger: logger, + }, nil +} + +func (c *client) dial(ctx context.Context) error { + var err error + var conn quic.EarlyConnection + if c.dialer != nil { + conn, err = c.dialer(ctx, c.hostname, c.tlsConf, c.config) + } else { + conn, err = dialAddr(ctx, c.hostname, c.tlsConf, c.config) + } + if err != nil { + return err + } + c.conn.Store(&conn) + + // send the SETTINGs frame, using 0-RTT data, if possible + go func() { + if err := c.setupConn(conn); err != nil { + c.logger.Debugf("Setting up connection failed: %s", err) + conn.CloseWithError(quic.ApplicationErrorCode(errorInternalError), "") + } + }() + + if c.opts.StreamHijacker != nil { + go c.handleBidirectionalStreams(conn) + } + go c.handleUnidirectionalStreams(conn) + return nil +} + +func (c *client) setupConn(conn quic.EarlyConnection) error { + // open the control stream + str, err := conn.OpenUniStream() + if err != nil { + return err + } + b := make([]byte, 0, 64) + b = quicvarint.Append(b, streamTypeControlStream) + // send the SETTINGS frame + b = (&settingsFrame{Datagram: c.opts.EnableDatagram, Other: c.opts.AdditionalSettings}).Append(b) + _, err = str.Write(b) + return err +} + +func (c *client) handleBidirectionalStreams(conn quic.EarlyConnection) { + for { + str, err := conn.AcceptStream(context.Background()) + if err != nil { + c.logger.Debugf("accepting bidirectional stream failed: %s", err) + return + } + go func(str quic.Stream) { + _, err := parseNextFrame(str, func(ft FrameType, e error) (processed bool, err error) { + return c.opts.StreamHijacker(ft, conn, str, e) + }) + if err == errHijacked { + return + } + if err != nil { + c.logger.Debugf("error handling stream: %s", err) + } + conn.CloseWithError(quic.ApplicationErrorCode(errorFrameUnexpected), "received HTTP/3 frame on bidirectional stream") + }(str) + } +} + +func (c *client) handleUnidirectionalStreams(conn quic.EarlyConnection) { + for { + str, err := conn.AcceptUniStream(context.Background()) + if err != nil { + c.logger.Debugf("accepting unidirectional stream failed: %s", err) + return + } + + go func(str quic.ReceiveStream) { + streamType, err := quicvarint.Read(quicvarint.NewReader(str)) + if err != nil { + if c.opts.UniStreamHijacker != nil && c.opts.UniStreamHijacker(StreamType(streamType), conn, str, err) { + return + } + c.logger.Debugf("reading stream type on stream %d failed: %s", str.StreamID(), err) + return + } + // We're only interested in the control stream here. + switch streamType { + case streamTypeControlStream: + case streamTypeQPACKEncoderStream, streamTypeQPACKDecoderStream: + // Our QPACK implementation doesn't use the dynamic table yet. + // TODO: check that only one stream of each type is opened. + return + case streamTypePushStream: + // We never increased the Push ID, so we don't expect any push streams. + conn.CloseWithError(quic.ApplicationErrorCode(errorIDError), "") + return + default: + if c.opts.UniStreamHijacker != nil && c.opts.UniStreamHijacker(StreamType(streamType), conn, str, nil) { + return + } + str.CancelRead(quic.StreamErrorCode(errorStreamCreationError)) + return + } + f, err := parseNextFrame(str, nil) + if err != nil { + conn.CloseWithError(quic.ApplicationErrorCode(errorFrameError), "") + return + } + sf, ok := f.(*settingsFrame) + if !ok { + conn.CloseWithError(quic.ApplicationErrorCode(errorMissingSettings), "") + return + } + if !sf.Datagram { + return + } + // If datagram support was enabled on our side as well as on the server side, + // we can expect it to have been negotiated both on the transport and on the HTTP/3 layer. + // Note: ConnectionState() will block until the handshake is complete (relevant when using 0-RTT). + if c.opts.EnableDatagram && !conn.ConnectionState().SupportsDatagrams { + conn.CloseWithError(quic.ApplicationErrorCode(errorSettingsError), "missing QUIC Datagram support") + } + }(str) + } +} + +func (c *client) Close() error { + conn := c.conn.Load() + if conn == nil { + return nil + } + return (*conn).CloseWithError(quic.ApplicationErrorCode(errorNoError), "") +} + +func (c *client) maxHeaderBytes() uint64 { + if c.opts.MaxHeaderBytes <= 0 { + return defaultMaxResponseHeaderBytes + } + return uint64(c.opts.MaxHeaderBytes) +} + +// RoundTripOpt executes a request and returns a response +func (c *client) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { + if authorityAddr("https", hostnameFromRequest(req)) != c.hostname { + return nil, fmt.Errorf("http3 client BUG: RoundTripOpt called for the wrong client (expected %s, got %s)", c.hostname, req.Host) + } + + c.dialOnce.Do(func() { + c.handshakeErr = c.dial(req.Context()) + }) + if c.handshakeErr != nil { + return nil, c.handshakeErr + } + + // At this point, c.conn is guaranteed to be set. + conn := *c.conn.Load() + + // Immediately send out this request, if this is a 0-RTT request. + if req.Method == MethodGet0RTT { + req.Method = http.MethodGet + } else { + // wait for the handshake to complete + select { + case <-conn.HandshakeComplete().Done(): + case <-req.Context().Done(): + return nil, req.Context().Err() + } + } + + str, err := conn.OpenStreamSync(req.Context()) + if err != nil { + return nil, err + } + + // Request Cancellation: + // This go routine keeps running even after RoundTripOpt() returns. + // It is shut down when the application is done processing the body. + reqDone := make(chan struct{}) + done := make(chan struct{}) + go func() { + defer close(done) + select { + case <-req.Context().Done(): + str.CancelWrite(quic.StreamErrorCode(errorRequestCanceled)) + str.CancelRead(quic.StreamErrorCode(errorRequestCanceled)) + case <-reqDone: + } + }() + + doneChan := reqDone + if opt.DontCloseRequestStream { + doneChan = nil + } + rsp, rerr := c.doRequest(req, conn, str, opt, doneChan) + if rerr.err != nil { // if any error occurred + close(reqDone) + <-done + if rerr.streamErr != 0 { // if it was a stream error + str.CancelWrite(quic.StreamErrorCode(rerr.streamErr)) + } + if rerr.connErr != 0 { // if it was a connection error + var reason string + if rerr.err != nil { + reason = rerr.err.Error() + } + conn.CloseWithError(quic.ApplicationErrorCode(rerr.connErr), reason) + } + return nil, rerr.err + } + if opt.DontCloseRequestStream { + close(reqDone) + <-done + } + return rsp, rerr.err +} + +func (c *client) sendRequestBody(str Stream, body io.ReadCloser) error { + defer body.Close() + b := make([]byte, bodyCopyBufferSize) + for { + n, rerr := body.Read(b) + if n == 0 { + if rerr == nil { + continue + } + if rerr == io.EOF { + break + } + } + if _, err := str.Write(b[:n]); err != nil { + return err + } + if rerr != nil { + if rerr == io.EOF { + break + } + str.CancelWrite(quic.StreamErrorCode(errorRequestCanceled)) + return rerr + } + } + return nil +} + +func (c *client) doRequest(req *http.Request, conn quic.EarlyConnection, str quic.Stream, opt RoundTripOpt, reqDone chan<- struct{}) (*http.Response, requestError) { + var requestGzip bool + if !c.opts.DisableCompression && req.Method != "HEAD" && req.Header.Get("Accept-Encoding") == "" && req.Header.Get("Range") == "" { + requestGzip = true + } + if err := c.requestWriter.WriteRequestHeader(str, req, requestGzip); err != nil { + return nil, newStreamError(errorInternalError, err) + } + + if req.Body == nil && !opt.DontCloseRequestStream { + str.Close() + } + + hstr := newStream(str, func() { conn.CloseWithError(quic.ApplicationErrorCode(errorFrameUnexpected), "") }) + if req.Body != nil { + // send the request body asynchronously + go func() { + if err := c.sendRequestBody(hstr, req.Body); err != nil { + c.logger.Errorf("Error writing request: %s", err) + } + if !opt.DontCloseRequestStream { + hstr.Close() + } + }() + } + + frame, err := parseNextFrame(str, nil) + if err != nil { + return nil, newStreamError(errorFrameError, err) + } + hf, ok := frame.(*headersFrame) + if !ok { + return nil, newConnError(errorFrameUnexpected, errors.New("expected first frame to be a HEADERS frame")) + } + if hf.Length > c.maxHeaderBytes() { + return nil, newStreamError(errorFrameError, fmt.Errorf("HEADERS frame too large: %d bytes (max: %d)", hf.Length, c.maxHeaderBytes())) + } + headerBlock := make([]byte, hf.Length) + if _, err := io.ReadFull(str, headerBlock); err != nil { + return nil, newStreamError(errorRequestIncomplete, err) + } + hfs, err := c.decoder.DecodeFull(headerBlock) + if err != nil { + // TODO: use the right error code + return nil, newConnError(errorGeneralProtocolError, err) + } + + connState := qtls.ToTLSConnectionState(conn.ConnectionState().TLS) + res := &http.Response{ + Proto: "HTTP/3.0", + ProtoMajor: 3, + Header: http.Header{}, + TLS: &connState, + Request: req, + } + for _, hf := range hfs { + switch hf.Name { + case ":status": + status, err := strconv.Atoi(hf.Value) + if err != nil { + return nil, newStreamError(errorGeneralProtocolError, errors.New("malformed non-numeric status pseudo header")) + } + res.StatusCode = status + res.Status = hf.Value + " " + http.StatusText(status) + default: + res.Header.Add(hf.Name, hf.Value) + } + } + respBody := newResponseBody(hstr, conn, reqDone) + + // Rules for when to set Content-Length are defined in https://tools.ietf.org/html/rfc7230#section-3.3.2. + _, hasTransferEncoding := res.Header["Transfer-Encoding"] + isInformational := res.StatusCode >= 100 && res.StatusCode < 200 + isNoContent := res.StatusCode == http.StatusNoContent + isSuccessfulConnect := req.Method == http.MethodConnect && res.StatusCode >= 200 && res.StatusCode < 300 + if !hasTransferEncoding && !isInformational && !isNoContent && !isSuccessfulConnect { + res.ContentLength = -1 + if clens, ok := res.Header["Content-Length"]; ok && len(clens) == 1 { + if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { + res.ContentLength = clen64 + } + } + } + + if requestGzip && res.Header.Get("Content-Encoding") == "gzip" { + res.Header.Del("Content-Encoding") + res.Header.Del("Content-Length") + res.ContentLength = -1 + res.Body = newGzipReader(respBody) + res.Uncompressed = true + } else { + res.Body = respBody + } + + return res, requestError{} +} + +func (c *client) HandshakeComplete() bool { + conn := c.conn.Load() + if conn == nil { + return false + } + select { + case <-(*conn).HandshakeComplete().Done(): + return true + default: + return false + } +} diff --git a/vendor/github.com/quic-go/quic-go/http3/error_codes.go b/vendor/github.com/quic-go/quic-go/http3/error_codes.go new file mode 100644 index 00000000..5df9b5df --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/http3/error_codes.go @@ -0,0 +1,73 @@ +package http3 + +import ( + "fmt" + + "github.com/quic-go/quic-go" +) + +type errorCode quic.ApplicationErrorCode + +const ( + errorNoError errorCode = 0x100 + errorGeneralProtocolError errorCode = 0x101 + errorInternalError errorCode = 0x102 + errorStreamCreationError errorCode = 0x103 + errorClosedCriticalStream errorCode = 0x104 + errorFrameUnexpected errorCode = 0x105 + errorFrameError errorCode = 0x106 + errorExcessiveLoad errorCode = 0x107 + errorIDError errorCode = 0x108 + errorSettingsError errorCode = 0x109 + errorMissingSettings errorCode = 0x10a + errorRequestRejected errorCode = 0x10b + errorRequestCanceled errorCode = 0x10c + errorRequestIncomplete errorCode = 0x10d + errorMessageError errorCode = 0x10e + errorConnectError errorCode = 0x10f + errorVersionFallback errorCode = 0x110 + errorDatagramError errorCode = 0x4a1268 +) + +func (e errorCode) String() string { + switch e { + case errorNoError: + return "H3_NO_ERROR" + case errorGeneralProtocolError: + return "H3_GENERAL_PROTOCOL_ERROR" + case errorInternalError: + return "H3_INTERNAL_ERROR" + case errorStreamCreationError: + return "H3_STREAM_CREATION_ERROR" + case errorClosedCriticalStream: + return "H3_CLOSED_CRITICAL_STREAM" + case errorFrameUnexpected: + return "H3_FRAME_UNEXPECTED" + case errorFrameError: + return "H3_FRAME_ERROR" + case errorExcessiveLoad: + return "H3_EXCESSIVE_LOAD" + case errorIDError: + return "H3_ID_ERROR" + case errorSettingsError: + return "H3_SETTINGS_ERROR" + case errorMissingSettings: + return "H3_MISSING_SETTINGS" + case errorRequestRejected: + return "H3_REQUEST_REJECTED" + case errorRequestCanceled: + return "H3_REQUEST_CANCELLED" + case errorRequestIncomplete: + return "H3_INCOMPLETE_REQUEST" + case errorMessageError: + return "H3_MESSAGE_ERROR" + case errorConnectError: + return "H3_CONNECT_ERROR" + case errorVersionFallback: + return "H3_VERSION_FALLBACK" + case errorDatagramError: + return "H3_DATAGRAM_ERROR" + default: + return fmt.Sprintf("unknown error code: %#x", uint16(e)) + } +} diff --git a/vendor/github.com/quic-go/quic-go/http3/frames.go b/vendor/github.com/quic-go/quic-go/http3/frames.go new file mode 100644 index 00000000..cdd97bc5 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/http3/frames.go @@ -0,0 +1,164 @@ +package http3 + +import ( + "bytes" + "errors" + "fmt" + "io" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" +) + +// FrameType is the frame type of a HTTP/3 frame +type FrameType uint64 + +type unknownFrameHandlerFunc func(FrameType, error) (processed bool, err error) + +type frame interface{} + +var errHijacked = errors.New("hijacked") + +func parseNextFrame(r io.Reader, unknownFrameHandler unknownFrameHandlerFunc) (frame, error) { + qr := quicvarint.NewReader(r) + for { + t, err := quicvarint.Read(qr) + if err != nil { + if unknownFrameHandler != nil { + hijacked, err := unknownFrameHandler(0, err) + if err != nil { + return nil, err + } + if hijacked { + return nil, errHijacked + } + } + return nil, err + } + // Call the unknownFrameHandler for frames not defined in the HTTP/3 spec + if t > 0xd && unknownFrameHandler != nil { + hijacked, err := unknownFrameHandler(FrameType(t), nil) + if err != nil { + return nil, err + } + if hijacked { + return nil, errHijacked + } + // If the unknownFrameHandler didn't process the frame, it is our responsibility to skip it. + } + l, err := quicvarint.Read(qr) + if err != nil { + return nil, err + } + + switch t { + case 0x0: + return &dataFrame{Length: l}, nil + case 0x1: + return &headersFrame{Length: l}, nil + case 0x4: + return parseSettingsFrame(r, l) + case 0x3: // CANCEL_PUSH + case 0x5: // PUSH_PROMISE + case 0x7: // GOAWAY + case 0xd: // MAX_PUSH_ID + } + // skip over unknown frames + if _, err := io.CopyN(io.Discard, qr, int64(l)); err != nil { + return nil, err + } + } +} + +type dataFrame struct { + Length uint64 +} + +func (f *dataFrame) Append(b []byte) []byte { + b = quicvarint.Append(b, 0x0) + return quicvarint.Append(b, f.Length) +} + +type headersFrame struct { + Length uint64 +} + +func (f *headersFrame) Append(b []byte) []byte { + b = quicvarint.Append(b, 0x1) + return quicvarint.Append(b, f.Length) +} + +const settingDatagram = 0xffd277 + +type settingsFrame struct { + Datagram bool + Other map[uint64]uint64 // all settings that we don't explicitly recognize +} + +func parseSettingsFrame(r io.Reader, l uint64) (*settingsFrame, error) { + if l > 8*(1<<10) { + return nil, fmt.Errorf("unexpected size for SETTINGS frame: %d", l) + } + buf := make([]byte, l) + if _, err := io.ReadFull(r, buf); err != nil { + if err == io.ErrUnexpectedEOF { + return nil, io.EOF + } + return nil, err + } + frame := &settingsFrame{} + b := bytes.NewReader(buf) + var readDatagram bool + for b.Len() > 0 { + id, err := quicvarint.Read(b) + if err != nil { // should not happen. We allocated the whole frame already. + return nil, err + } + val, err := quicvarint.Read(b) + if err != nil { // should not happen. We allocated the whole frame already. + return nil, err + } + + switch id { + case settingDatagram: + if readDatagram { + return nil, fmt.Errorf("duplicate setting: %d", id) + } + readDatagram = true + if val != 0 && val != 1 { + return nil, fmt.Errorf("invalid value for H3_DATAGRAM: %d", val) + } + frame.Datagram = val == 1 + default: + if _, ok := frame.Other[id]; ok { + return nil, fmt.Errorf("duplicate setting: %d", id) + } + if frame.Other == nil { + frame.Other = make(map[uint64]uint64) + } + frame.Other[id] = val + } + } + return frame, nil +} + +func (f *settingsFrame) Append(b []byte) []byte { + b = quicvarint.Append(b, 0x4) + var l protocol.ByteCount + for id, val := range f.Other { + l += quicvarint.Len(id) + quicvarint.Len(val) + } + if f.Datagram { + l += quicvarint.Len(settingDatagram) + quicvarint.Len(1) + } + b = quicvarint.Append(b, uint64(l)) + if f.Datagram { + b = quicvarint.Append(b, settingDatagram) + b = quicvarint.Append(b, 1) + } + for id, val := range f.Other { + b = quicvarint.Append(b, id) + b = quicvarint.Append(b, val) + } + return b +} diff --git a/vendor/github.com/quic-go/quic-go/http3/gzip_reader.go b/vendor/github.com/quic-go/quic-go/http3/gzip_reader.go new file mode 100644 index 00000000..01983ac7 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/http3/gzip_reader.go @@ -0,0 +1,39 @@ +package http3 + +// copied from net/transport.go + +// gzipReader wraps a response body so it can lazily +// call gzip.NewReader on the first call to Read +import ( + "compress/gzip" + "io" +) + +// call gzip.NewReader on the first call to Read +type gzipReader struct { + body io.ReadCloser // underlying Response.Body + zr *gzip.Reader // lazily-initialized gzip reader + zerr error // sticky error +} + +func newGzipReader(body io.ReadCloser) io.ReadCloser { + return &gzipReader{body: body} +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + if gz.zerr != nil { + return 0, gz.zerr + } + if gz.zr == nil { + gz.zr, err = gzip.NewReader(gz.body) + if err != nil { + gz.zerr = err + return 0, err + } + } + return gz.zr.Read(p) +} + +func (gz *gzipReader) Close() error { + return gz.body.Close() +} diff --git a/vendor/github.com/quic-go/quic-go/http3/http_stream.go b/vendor/github.com/quic-go/quic-go/http3/http_stream.go new file mode 100644 index 00000000..2799e2b3 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/http3/http_stream.go @@ -0,0 +1,76 @@ +package http3 + +import ( + "fmt" + + "github.com/quic-go/quic-go" +) + +// A Stream is a HTTP/3 stream. +// When writing to and reading from the stream, data is framed in HTTP/3 DATA frames. +type Stream quic.Stream + +// The stream conforms to the quic.Stream interface, but instead of writing to and reading directly +// from the QUIC stream, it writes to and reads from the HTTP stream. +type stream struct { + quic.Stream + + buf []byte + + onFrameError func() + bytesRemainingInFrame uint64 +} + +var _ Stream = &stream{} + +func newStream(str quic.Stream, onFrameError func()) *stream { + return &stream{ + Stream: str, + onFrameError: onFrameError, + buf: make([]byte, 0, 16), + } +} + +func (s *stream) Read(b []byte) (int, error) { + if s.bytesRemainingInFrame == 0 { + parseLoop: + for { + frame, err := parseNextFrame(s.Stream, nil) + if err != nil { + return 0, err + } + switch f := frame.(type) { + case *headersFrame: + // skip HEADERS frames + continue + case *dataFrame: + s.bytesRemainingInFrame = f.Length + break parseLoop + default: + s.onFrameError() + // parseNextFrame skips over unknown frame types + // Therefore, this condition is only entered when we parsed another known frame type. + return 0, fmt.Errorf("peer sent an unexpected frame: %T", f) + } + } + } + + var n int + var err error + if s.bytesRemainingInFrame < uint64(len(b)) { + n, err = s.Stream.Read(b[:s.bytesRemainingInFrame]) + } else { + n, err = s.Stream.Read(b) + } + s.bytesRemainingInFrame -= uint64(n) + return n, err +} + +func (s *stream) Write(b []byte) (int, error) { + s.buf = s.buf[:0] + s.buf = (&dataFrame{Length: uint64(len(b))}).Append(s.buf) + if _, err := s.Stream.Write(s.buf); err != nil { + return 0, err + } + return s.Stream.Write(b) +} diff --git a/vendor/github.com/quic-go/quic-go/http3/request.go b/vendor/github.com/quic-go/quic-go/http3/request.go new file mode 100644 index 00000000..9af25a57 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/http3/request.go @@ -0,0 +1,111 @@ +package http3 + +import ( + "errors" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/quic-go/qpack" +) + +func requestFromHeaders(headers []qpack.HeaderField) (*http.Request, error) { + var path, authority, method, protocol, scheme, contentLengthStr string + + httpHeaders := http.Header{} + for _, h := range headers { + switch h.Name { + case ":path": + path = h.Value + case ":method": + method = h.Value + case ":authority": + authority = h.Value + case ":protocol": + protocol = h.Value + case ":scheme": + scheme = h.Value + case "content-length": + contentLengthStr = h.Value + default: + if !h.IsPseudo() { + httpHeaders.Add(h.Name, h.Value) + } + } + } + + // concatenate cookie headers, see https://tools.ietf.org/html/rfc6265#section-5.4 + if len(httpHeaders["Cookie"]) > 0 { + httpHeaders.Set("Cookie", strings.Join(httpHeaders["Cookie"], "; ")) + } + + isConnect := method == http.MethodConnect + // Extended CONNECT, see https://datatracker.ietf.org/doc/html/rfc8441#section-4 + isExtendedConnected := isConnect && protocol != "" + if isExtendedConnected { + if scheme == "" || path == "" || authority == "" { + return nil, errors.New("extended CONNECT: :scheme, :path and :authority must not be empty") + } + } else if isConnect { + if path != "" || authority == "" { // normal CONNECT + return nil, errors.New(":path must be empty and :authority must not be empty") + } + } else if len(path) == 0 || len(authority) == 0 || len(method) == 0 { + return nil, errors.New(":path, :authority and :method must not be empty") + } + + var u *url.URL + var requestURI string + var err error + + if isConnect { + u = &url.URL{} + if isExtendedConnected { + u, err = url.ParseRequestURI(path) + if err != nil { + return nil, err + } + } else { + u.Path = path + } + u.Scheme = scheme + u.Host = authority + requestURI = authority + } else { + protocol = "HTTP/3.0" + u, err = url.ParseRequestURI(path) + if err != nil { + return nil, err + } + requestURI = path + } + + var contentLength int64 + if len(contentLengthStr) > 0 { + contentLength, err = strconv.ParseInt(contentLengthStr, 10, 64) + if err != nil { + return nil, err + } + } + + return &http.Request{ + Method: method, + URL: u, + Proto: protocol, + ProtoMajor: 3, + ProtoMinor: 0, + Header: httpHeaders, + Body: nil, + ContentLength: contentLength, + Host: authority, + RequestURI: requestURI, + }, nil +} + +func hostnameFromRequest(req *http.Request) string { + if req.URL != nil { + return req.URL.Host + } + return "" +} diff --git a/vendor/github.com/quic-go/quic-go/http3/request_writer.go b/vendor/github.com/quic-go/quic-go/http3/request_writer.go new file mode 100644 index 00000000..fcff6a1f --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/http3/request_writer.go @@ -0,0 +1,283 @@ +package http3 + +import ( + "bytes" + "fmt" + "io" + "net" + "net/http" + "strconv" + "strings" + "sync" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" + "golang.org/x/net/idna" + + "github.com/quic-go/qpack" + "github.com/quic-go/quic-go" + "github.com/quic-go/quic-go/internal/utils" +) + +const bodyCopyBufferSize = 8 * 1024 + +type requestWriter struct { + mutex sync.Mutex + encoder *qpack.Encoder + headerBuf *bytes.Buffer + + logger utils.Logger +} + +func newRequestWriter(logger utils.Logger) *requestWriter { + headerBuf := &bytes.Buffer{} + encoder := qpack.NewEncoder(headerBuf) + return &requestWriter{ + encoder: encoder, + headerBuf: headerBuf, + logger: logger, + } +} + +func (w *requestWriter) WriteRequestHeader(str quic.Stream, req *http.Request, gzip bool) error { + // TODO: figure out how to add support for trailers + buf := &bytes.Buffer{} + if err := w.writeHeaders(buf, req, gzip); err != nil { + return err + } + _, err := str.Write(buf.Bytes()) + return err +} + +func (w *requestWriter) writeHeaders(wr io.Writer, req *http.Request, gzip bool) error { + w.mutex.Lock() + defer w.mutex.Unlock() + defer w.encoder.Close() + defer w.headerBuf.Reset() + + if err := w.encodeHeaders(req, gzip, "", actualContentLength(req)); err != nil { + return err + } + + b := make([]byte, 0, 128) + b = (&headersFrame{Length: uint64(w.headerBuf.Len())}).Append(b) + if _, err := wr.Write(b); err != nil { + return err + } + _, err := wr.Write(w.headerBuf.Bytes()) + return err +} + +// copied from net/transport.go +// Modified to support Extended CONNECT: +// Contrary to what the godoc for the http.Request says, +// we do respect the Proto field if the method is CONNECT. +func (w *requestWriter) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) error { + host := req.Host + if host == "" { + host = req.URL.Host + } + host, err := httpguts.PunycodeHostPort(host) + if err != nil { + return err + } + + // http.NewRequest sets this field to HTTP/1.1 + isExtendedConnect := req.Method == http.MethodConnect && req.Proto != "" && req.Proto != "HTTP/1.1" + + var path string + if req.Method != http.MethodConnect || isExtendedConnect { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + + // Check for any invalid headers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + for k, vv := range req.Header { + if !httpguts.ValidHeaderFieldName(k) { + return fmt.Errorf("invalid HTTP header name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + return fmt.Errorf("invalid HTTP header value %q for header %q", v, k) + } + } + } + + enumerateHeaders := func(f func(name, value string)) { + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production (see Sections 3.3 and 3.4 of + // [RFC3986]). + f(":authority", host) + f(":method", req.Method) + if req.Method != http.MethodConnect || isExtendedConnect { + f(":path", path) + f(":scheme", req.URL.Scheme) + } + if isExtendedConnect { + f(":protocol", req.Proto) + } + if trailers != "" { + f("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") { + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + } else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") || + strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") || + strings.EqualFold(k, "keep-alive") { + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + } else if strings.EqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + + } + + for _, v := range vv { + f(k, v) + } + } + if shouldSendReqContentLength(req.Method, contentLength) { + f("content-length", strconv.FormatInt(contentLength, 10)) + } + if addGzipHeader { + f("accept-encoding", "gzip") + } + if !didUA { + f("user-agent", defaultUserAgent) + } + } + + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + // TODO: check maximum header list size + // if hlSize > cc.peerMaxHeaderListSize { + // return errRequestHeaderListSize + // } + + // trace := httptrace.ContextClientTrace(req.Context()) + // traceHeaders := traceHasWroteHeaderField(trace) + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + name = strings.ToLower(name) + w.encoder.WriteField(qpack.HeaderField{Name: name, Value: value}) + // if traceHeaders { + // traceWroteHeaderField(trace, name, value) + // } + }) + + return nil +} + +// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) +// and returns a host:port. The port 443 is added if needed. +func authorityAddr(scheme string, authority string) (addr string) { + host, port, err := net.SplitHostPort(authority) + if err != nil { // authority didn't have a port + port = "443" + if scheme == "http" { + port = "80" + } + host = authority + } + if a, err := idna.ToASCII(host); err == nil { + host = a + } + // IPv6 address literal, without a port: + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + return host + ":" + port + } + return net.JoinHostPort(host, port) +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// *) a non-empty string starting with '/' +// *) the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/') || v == "*" +} + +// actualContentLength returns a sanitized version of +// req.ContentLength, where 0 actually means zero (not unknown) and -1 +// means unknown. +func actualContentLength(req *http.Request) int64 { + if req.Body == nil { + return 0 + } + if req.ContentLength != 0 { + return req.ContentLength + } + return -1 +} + +// shouldSendReqContentLength reports whether the http2.Transport should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} diff --git a/vendor/github.com/quic-go/quic-go/http3/response_writer.go b/vendor/github.com/quic-go/quic-go/http3/response_writer.go new file mode 100644 index 00000000..5cc32923 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/http3/response_writer.go @@ -0,0 +1,137 @@ +package http3 + +import ( + "bufio" + "bytes" + "net/http" + "strconv" + "strings" + + "github.com/quic-go/quic-go" + "github.com/quic-go/quic-go/internal/utils" + + "github.com/quic-go/qpack" +) + +type responseWriter struct { + conn quic.Connection + bufferedStr *bufio.Writer + buf []byte + + header http.Header + status int // status code passed to WriteHeader + headerWritten bool + + logger utils.Logger +} + +var ( + _ http.ResponseWriter = &responseWriter{} + _ http.Flusher = &responseWriter{} + _ Hijacker = &responseWriter{} +) + +func newResponseWriter(str quic.Stream, conn quic.Connection, logger utils.Logger) *responseWriter { + return &responseWriter{ + header: http.Header{}, + buf: make([]byte, 16), + conn: conn, + bufferedStr: bufio.NewWriter(str), + logger: logger, + } +} + +func (w *responseWriter) Header() http.Header { + return w.header +} + +func (w *responseWriter) WriteHeader(status int) { + if w.headerWritten { + return + } + + if status < 100 || status >= 200 { + w.headerWritten = true + } + w.status = status + + var headers bytes.Buffer + enc := qpack.NewEncoder(&headers) + enc.WriteField(qpack.HeaderField{Name: ":status", Value: strconv.Itoa(status)}) + + for k, v := range w.header { + for index := range v { + enc.WriteField(qpack.HeaderField{Name: strings.ToLower(k), Value: v[index]}) + } + } + + w.buf = w.buf[:0] + w.buf = (&headersFrame{Length: uint64(headers.Len())}).Append(w.buf) + w.logger.Infof("Responding with %d", status) + if _, err := w.bufferedStr.Write(w.buf); err != nil { + w.logger.Errorf("could not write headers frame: %s", err.Error()) + } + if _, err := w.bufferedStr.Write(headers.Bytes()); err != nil { + w.logger.Errorf("could not write header frame payload: %s", err.Error()) + } + if !w.headerWritten { + w.Flush() + } +} + +func (w *responseWriter) Write(p []byte) (int, error) { + bodyAllowed := bodyAllowedForStatus(w.status) + if !w.headerWritten { + // If body is not allowed, we don't need to (and we can't) sniff the content type. + if bodyAllowed { + // If no content type, apply sniffing algorithm to body. + // We can't use `w.header.Get` here since if the Content-Type was set to nil, we shoundn't do sniffing. + _, haveType := w.header["Content-Type"] + + // If the Transfer-Encoding or Content-Encoding was set and is non-blank, + // we shouldn't sniff the body. + hasTE := w.header.Get("Transfer-Encoding") != "" + hasCE := w.header.Get("Content-Encoding") != "" + if !hasCE && !haveType && !hasTE && len(p) > 0 { + w.header.Set("Content-Type", http.DetectContentType(p)) + } + } + w.WriteHeader(http.StatusOK) + bodyAllowed = true + } + if !bodyAllowed { + return 0, http.ErrBodyNotAllowed + } + df := &dataFrame{Length: uint64(len(p))} + w.buf = w.buf[:0] + w.buf = df.Append(w.buf) + if _, err := w.bufferedStr.Write(w.buf); err != nil { + return 0, err + } + return w.bufferedStr.Write(p) +} + +func (w *responseWriter) Flush() { + if err := w.bufferedStr.Flush(); err != nil { + w.logger.Errorf("could not flush to stream: %s", err.Error()) + } +} + +func (w *responseWriter) StreamCreator() StreamCreator { + return w.conn +} + +// copied from http2/http2.go +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC 2616, section 4.4. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == http.StatusNoContent: + return false + case status == http.StatusNotModified: + return false + } + return true +} diff --git a/vendor/github.com/quic-go/quic-go/http3/roundtrip.go b/vendor/github.com/quic-go/quic-go/http3/roundtrip.go new file mode 100644 index 00000000..d9812abb --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/http3/roundtrip.go @@ -0,0 +1,247 @@ +package http3 + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + + "golang.org/x/net/http/httpguts" + + "github.com/quic-go/quic-go" +) + +type roundTripCloser interface { + RoundTripOpt(*http.Request, RoundTripOpt) (*http.Response, error) + HandshakeComplete() bool + io.Closer +} + +// RoundTripper implements the http.RoundTripper interface +type RoundTripper struct { + mutex sync.Mutex + + // DisableCompression, if true, prevents the Transport from + // requesting compression with an "Accept-Encoding: gzip" + // request header when the Request contains no existing + // Accept-Encoding value. If the Transport requests gzip on + // its own and gets a gzipped response, it's transparently + // decoded in the Response.Body. However, if the user + // explicitly requested gzip it is not automatically + // uncompressed. + DisableCompression bool + + // TLSClientConfig specifies the TLS configuration to use with + // tls.Client. If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // QuicConfig is the quic.Config used for dialing new connections. + // If nil, reasonable default values will be used. + QuicConfig *quic.Config + + // Enable support for HTTP/3 datagrams. + // If set to true, QuicConfig.EnableDatagram will be set. + // See https://www.ietf.org/archive/id/draft-schinazi-masque-h3-datagram-02.html. + EnableDatagrams bool + + // Additional HTTP/3 settings. + // It is invalid to specify any settings defined by the HTTP/3 draft and the datagram draft. + AdditionalSettings map[uint64]uint64 + + // When set, this callback is called for the first unknown frame parsed on a bidirectional stream. + // It is called right after parsing the frame type. + // If parsing the frame type fails, the error is passed to the callback. + // In that case, the frame type will not be set. + // Callers can either ignore the frame and return control of the stream back to HTTP/3 + // (by returning hijacked false). + // Alternatively, callers can take over the QUIC stream (by returning hijacked true). + StreamHijacker func(FrameType, quic.Connection, quic.Stream, error) (hijacked bool, err error) + + // When set, this callback is called for unknown unidirectional stream of unknown stream type. + // If parsing the stream type fails, the error is passed to the callback. + // In that case, the stream type will not be set. + UniStreamHijacker func(StreamType, quic.Connection, quic.ReceiveStream, error) (hijacked bool) + + // Dial specifies an optional dial function for creating QUIC + // connections for requests. + // If Dial is nil, quic.DialAddrEarlyContext will be used. + Dial func(ctx context.Context, addr string, tlsCfg *tls.Config, cfg *quic.Config) (quic.EarlyConnection, error) + + // MaxResponseHeaderBytes specifies a limit on how many response bytes are + // allowed in the server's response header. + // Zero means to use a default limit. + MaxResponseHeaderBytes int64 + + newClient func(hostname string, tlsConf *tls.Config, opts *roundTripperOpts, conf *quic.Config, dialer dialFunc) (roundTripCloser, error) // so we can mock it in tests + clients map[string]roundTripCloser +} + +// RoundTripOpt are options for the Transport.RoundTripOpt method. +type RoundTripOpt struct { + // OnlyCachedConn controls whether the RoundTripper may create a new QUIC connection. + // If set true and no cached connection is available, RoundTripOpt will return ErrNoCachedConn. + OnlyCachedConn bool + // DontCloseRequestStream controls whether the request stream is closed after sending the request. + // If set, context cancellations have no effect after the response headers are received. + DontCloseRequestStream bool +} + +var ( + _ http.RoundTripper = &RoundTripper{} + _ io.Closer = &RoundTripper{} +) + +// ErrNoCachedConn is returned when RoundTripper.OnlyCachedConn is set +var ErrNoCachedConn = errors.New("http3: no cached connection was available") + +// RoundTripOpt is like RoundTrip, but takes options. +func (r *RoundTripper) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { + if req.URL == nil { + closeRequestBody(req) + return nil, errors.New("http3: nil Request.URL") + } + if req.URL.Host == "" { + closeRequestBody(req) + return nil, errors.New("http3: no Host in request URL") + } + if req.Header == nil { + closeRequestBody(req) + return nil, errors.New("http3: nil Request.Header") + } + if req.URL.Scheme != "https" { + closeRequestBody(req) + return nil, fmt.Errorf("http3: unsupported protocol scheme: %s", req.URL.Scheme) + } + for k, vv := range req.Header { + if !httpguts.ValidHeaderFieldName(k) { + return nil, fmt.Errorf("http3: invalid http header field name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + return nil, fmt.Errorf("http3: invalid http header field value %q for key %v", v, k) + } + } + } + + if req.Method != "" && !validMethod(req.Method) { + closeRequestBody(req) + return nil, fmt.Errorf("http3: invalid method %q", req.Method) + } + + hostname := authorityAddr("https", hostnameFromRequest(req)) + cl, isReused, err := r.getClient(hostname, opt.OnlyCachedConn) + if err != nil { + return nil, err + } + rsp, err := cl.RoundTripOpt(req, opt) + if err != nil { + r.removeClient(hostname) + if isReused { + if nerr, ok := err.(net.Error); ok && nerr.Timeout() { + return r.RoundTripOpt(req, opt) + } + } + } + return rsp, err +} + +// RoundTrip does a round trip. +func (r *RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return r.RoundTripOpt(req, RoundTripOpt{}) +} + +func (r *RoundTripper) getClient(hostname string, onlyCached bool) (rtc roundTripCloser, isReused bool, err error) { + r.mutex.Lock() + defer r.mutex.Unlock() + + if r.clients == nil { + r.clients = make(map[string]roundTripCloser) + } + + client, ok := r.clients[hostname] + if !ok { + if onlyCached { + return nil, false, ErrNoCachedConn + } + var err error + newCl := newClient + if r.newClient != nil { + newCl = r.newClient + } + client, err = newCl( + hostname, + r.TLSClientConfig, + &roundTripperOpts{ + EnableDatagram: r.EnableDatagrams, + DisableCompression: r.DisableCompression, + MaxHeaderBytes: r.MaxResponseHeaderBytes, + StreamHijacker: r.StreamHijacker, + UniStreamHijacker: r.UniStreamHijacker, + }, + r.QuicConfig, + r.Dial, + ) + if err != nil { + return nil, false, err + } + r.clients[hostname] = client + } else if client.HandshakeComplete() { + isReused = true + } + return client, isReused, nil +} + +func (r *RoundTripper) removeClient(hostname string) { + r.mutex.Lock() + defer r.mutex.Unlock() + if r.clients == nil { + return + } + delete(r.clients, hostname) +} + +// Close closes the QUIC connections that this RoundTripper has used +func (r *RoundTripper) Close() error { + r.mutex.Lock() + defer r.mutex.Unlock() + for _, client := range r.clients { + if err := client.Close(); err != nil { + return err + } + } + r.clients = nil + return nil +} + +func closeRequestBody(req *http.Request) { + if req.Body != nil { + req.Body.Close() + } +} + +func validMethod(method string) bool { + /* + Method = "OPTIONS" ; Section 9.2 + | "GET" ; Section 9.3 + | "HEAD" ; Section 9.4 + | "POST" ; Section 9.5 + | "PUT" ; Section 9.6 + | "DELETE" ; Section 9.7 + | "TRACE" ; Section 9.8 + | "CONNECT" ; Section 9.9 + | extension-method + extension-method = token + token = 1* + */ + return len(method) > 0 && strings.IndexFunc(method, isNotToken) == -1 +} + +// copied from net/http/http.go +func isNotToken(r rune) bool { + return !httpguts.IsTokenRune(r) +} diff --git a/vendor/github.com/quic-go/quic-go/http3/server.go b/vendor/github.com/quic-go/quic-go/http3/server.go new file mode 100644 index 00000000..e546a930 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/http3/server.go @@ -0,0 +1,752 @@ +package http3 + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "net/http" + "runtime" + "strings" + "sync" + "time" + + "github.com/quic-go/quic-go" + "github.com/quic-go/quic-go/internal/handshake" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/quicvarint" + + "github.com/quic-go/qpack" +) + +// allows mocking of quic.Listen and quic.ListenAddr +var ( + quicListen = quic.ListenEarly + quicListenAddr = quic.ListenAddrEarly +) + +const ( + // NextProtoH3Draft29 is the ALPN protocol negotiated during the TLS handshake, for QUIC draft 29. + NextProtoH3Draft29 = "h3-29" + // NextProtoH3 is the ALPN protocol negotiated during the TLS handshake, for QUIC v1 and v2. + NextProtoH3 = "h3" +) + +// StreamType is the stream type of a unidirectional stream. +type StreamType uint64 + +const ( + streamTypeControlStream = 0 + streamTypePushStream = 1 + streamTypeQPACKEncoderStream = 2 + streamTypeQPACKDecoderStream = 3 +) + +func versionToALPN(v protocol.VersionNumber) string { + if v == protocol.Version1 || v == protocol.Version2 { + return NextProtoH3 + } + if v == protocol.VersionTLS || v == protocol.VersionDraft29 { + return NextProtoH3Draft29 + } + return "" +} + +// ConfigureTLSConfig creates a new tls.Config which can be used +// to create a quic.Listener meant for serving http3. The created +// tls.Config adds the functionality of detecting the used QUIC version +// in order to set the correct ALPN value for the http3 connection. +func ConfigureTLSConfig(tlsConf *tls.Config) *tls.Config { + // The tls.Config used to setup the quic.Listener needs to have the GetConfigForClient callback set. + // That way, we can get the QUIC version and set the correct ALPN value. + return &tls.Config{ + GetConfigForClient: func(ch *tls.ClientHelloInfo) (*tls.Config, error) { + // determine the ALPN from the QUIC version used + proto := NextProtoH3 + if qconn, ok := ch.Conn.(handshake.ConnWithVersion); ok { + proto = versionToALPN(qconn.GetQUICVersion()) + } + config := tlsConf + if tlsConf.GetConfigForClient != nil { + getConfigForClient := tlsConf.GetConfigForClient + var err error + conf, err := getConfigForClient(ch) + if err != nil { + return nil, err + } + if conf != nil { + config = conf + } + } + if config == nil { + return nil, nil + } + config = config.Clone() + config.NextProtos = []string{proto} + return config, nil + }, + } +} + +// contextKey is a value for use with context.WithValue. It's used as +// a pointer so it fits in an interface{} without allocation. +type contextKey struct { + name string +} + +func (k *contextKey) String() string { return "quic-go/http3 context value " + k.name } + +// ServerContextKey is a context key. It can be used in HTTP +// handlers with Context.Value to access the server that +// started the handler. The associated value will be of +// type *http3.Server. +var ServerContextKey = &contextKey{"http3-server"} + +type requestError struct { + err error + streamErr errorCode + connErr errorCode +} + +func newStreamError(code errorCode, err error) requestError { + return requestError{err: err, streamErr: code} +} + +func newConnError(code errorCode, err error) requestError { + return requestError{err: err, connErr: code} +} + +// listenerInfo contains info about specific listener added with addListener +type listenerInfo struct { + port int // 0 means that no info about port is available +} + +// Server is a HTTP/3 server. +type Server struct { + // Addr optionally specifies the UDP address for the server to listen on, + // in the form "host:port". + // + // When used by ListenAndServe and ListenAndServeTLS methods, if empty, + // ":https" (port 443) is used. See net.Dial for details of the address + // format. + // + // Otherwise, if Port is not set and underlying QUIC listeners do not + // have valid port numbers, the port part is used in Alt-Svc headers set + // with SetQuicHeaders. + Addr string + + // Port is used in Alt-Svc response headers set with SetQuicHeaders. If + // needed Port can be manually set when the Server is created. + // + // This is useful when a Layer 4 firewall is redirecting UDP traffic and + // clients must use a port different from the port the Server is + // listening on. + Port int + + // TLSConfig provides a TLS configuration for use by server. It must be + // set for ListenAndServe and Serve methods. + TLSConfig *tls.Config + + // QuicConfig provides the parameters for QUIC connection created with + // Serve. If nil, it uses reasonable default values. + // + // Configured versions are also used in Alt-Svc response header set with + // SetQuicHeaders. + QuicConfig *quic.Config + + // Handler is the HTTP request handler to use. If not set, defaults to + // http.NotFound. + Handler http.Handler + + // EnableDatagrams enables support for HTTP/3 datagrams. + // If set to true, QuicConfig.EnableDatagram will be set. + // See https://datatracker.ietf.org/doc/html/draft-ietf-masque-h3-datagram-07. + EnableDatagrams bool + + // MaxHeaderBytes controls the maximum number of bytes the server will + // read parsing the request HEADERS frame. It does not limit the size of + // the request body. If zero or negative, http.DefaultMaxHeaderBytes is + // used. + MaxHeaderBytes int + + // AdditionalSettings specifies additional HTTP/3 settings. + // It is invalid to specify any settings defined by the HTTP/3 draft and the datagram draft. + AdditionalSettings map[uint64]uint64 + + // StreamHijacker, when set, is called for the first unknown frame parsed on a bidirectional stream. + // It is called right after parsing the frame type. + // If parsing the frame type fails, the error is passed to the callback. + // In that case, the frame type will not be set. + // Callers can either ignore the frame and return control of the stream back to HTTP/3 + // (by returning hijacked false). + // Alternatively, callers can take over the QUIC stream (by returning hijacked true). + StreamHijacker func(FrameType, quic.Connection, quic.Stream, error) (hijacked bool, err error) + + // UniStreamHijacker, when set, is called for unknown unidirectional stream of unknown stream type. + // If parsing the stream type fails, the error is passed to the callback. + // In that case, the stream type will not be set. + UniStreamHijacker func(StreamType, quic.Connection, quic.ReceiveStream, error) (hijacked bool) + + mutex sync.RWMutex + listeners map[*quic.EarlyListener]listenerInfo + + closed bool + + altSvcHeader string + + logger utils.Logger +} + +// ListenAndServe listens on the UDP address s.Addr and calls s.Handler to handle HTTP/3 requests on incoming connections. +// +// If s.Addr is blank, ":https" is used. +func (s *Server) ListenAndServe() error { + return s.serveConn(s.TLSConfig, nil) +} + +// ListenAndServeTLS listens on the UDP address s.Addr and calls s.Handler to handle HTTP/3 requests on incoming connections. +// +// If s.Addr is blank, ":https" is used. +func (s *Server) ListenAndServeTLS(certFile, keyFile string) error { + var err error + certs := make([]tls.Certificate, 1) + certs[0], err = tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return err + } + // We currently only use the cert-related stuff from tls.Config, + // so we don't need to make a full copy. + config := &tls.Config{ + Certificates: certs, + } + return s.serveConn(config, nil) +} + +// Serve an existing UDP connection. +// It is possible to reuse the same connection for outgoing connections. +// Closing the server does not close the connection. +func (s *Server) Serve(conn net.PacketConn) error { + return s.serveConn(s.TLSConfig, conn) +} + +// ServeQUICConn serves a single QUIC connection. +func (s *Server) ServeQUICConn(conn quic.Connection) error { + s.mutex.Lock() + if s.logger == nil { + s.logger = utils.DefaultLogger.WithPrefix("server") + } + s.mutex.Unlock() + + return s.handleConn(conn) +} + +// ServeListener serves an existing QUIC listener. +// Make sure you use http3.ConfigureTLSConfig to configure a tls.Config +// and use it to construct a http3-friendly QUIC listener. +// Closing the server does close the listener. +func (s *Server) ServeListener(ln quic.EarlyListener) error { + if err := s.addListener(&ln); err != nil { + return err + } + err := s.serveListener(ln) + s.removeListener(&ln) + return err +} + +var errServerWithoutTLSConfig = errors.New("use of http3.Server without TLSConfig") + +func (s *Server) serveConn(tlsConf *tls.Config, conn net.PacketConn) error { + if tlsConf == nil { + return errServerWithoutTLSConfig + } + + s.mutex.Lock() + closed := s.closed + s.mutex.Unlock() + if closed { + return http.ErrServerClosed + } + + baseConf := ConfigureTLSConfig(tlsConf) + quicConf := s.QuicConfig + if quicConf == nil { + quicConf = &quic.Config{Allow0RTT: func(net.Addr) bool { return true }} + } else { + quicConf = s.QuicConfig.Clone() + } + if s.EnableDatagrams { + quicConf.EnableDatagrams = true + } + + var ln quic.EarlyListener + var err error + if conn == nil { + addr := s.Addr + if addr == "" { + addr = ":https" + } + ln, err = quicListenAddr(addr, baseConf, quicConf) + } else { + ln, err = quicListen(conn, baseConf, quicConf) + } + if err != nil { + return err + } + if err := s.addListener(&ln); err != nil { + return err + } + err = s.serveListener(ln) + s.removeListener(&ln) + return err +} + +func (s *Server) serveListener(ln quic.EarlyListener) error { + for { + conn, err := ln.Accept(context.Background()) + if err != nil { + return err + } + go func() { + if err := s.handleConn(conn); err != nil { + s.logger.Debugf(err.Error()) + } + }() + } +} + +func extractPort(addr string) (int, error) { + _, portStr, err := net.SplitHostPort(addr) + if err != nil { + return 0, err + } + + portInt, err := net.LookupPort("tcp", portStr) + if err != nil { + return 0, err + } + return portInt, nil +} + +func (s *Server) generateAltSvcHeader() { + if len(s.listeners) == 0 { + // Don't announce any ports since no one is listening for connections + s.altSvcHeader = "" + return + } + + // This code assumes that we will use protocol.SupportedVersions if no quic.Config is passed. + supportedVersions := protocol.SupportedVersions + if s.QuicConfig != nil && len(s.QuicConfig.Versions) > 0 { + supportedVersions = s.QuicConfig.Versions + } + + // keep track of which have been seen so we don't yield duplicate values + seen := make(map[string]struct{}, len(supportedVersions)) + var versionStrings []string + for _, version := range supportedVersions { + if v := versionToALPN(version); len(v) > 0 { + if _, ok := seen[v]; !ok { + versionStrings = append(versionStrings, v) + seen[v] = struct{}{} + } + } + } + + var altSvc []string + addPort := func(port int) { + for _, v := range versionStrings { + altSvc = append(altSvc, fmt.Sprintf(`%s=":%d"; ma=2592000`, v, port)) + } + } + + if s.Port != 0 { + // if Port is specified, we must use it instead of the + // listener addresses since there's a reason it's specified. + addPort(s.Port) + } else { + // if we have some listeners assigned, try to find ports + // which we can announce, otherwise nothing should be announced + validPortsFound := false + for _, info := range s.listeners { + if info.port != 0 { + addPort(info.port) + validPortsFound = true + } + } + if !validPortsFound { + if port, err := extractPort(s.Addr); err == nil { + addPort(port) + } + } + } + + s.altSvcHeader = strings.Join(altSvc, ",") +} + +// We store a pointer to interface in the map set. This is safe because we only +// call trackListener via Serve and can track+defer untrack the same pointer to +// local variable there. We never need to compare a Listener from another caller. +func (s *Server) addListener(l *quic.EarlyListener) error { + s.mutex.Lock() + defer s.mutex.Unlock() + + if s.closed { + return http.ErrServerClosed + } + if s.logger == nil { + s.logger = utils.DefaultLogger.WithPrefix("server") + } + if s.listeners == nil { + s.listeners = make(map[*quic.EarlyListener]listenerInfo) + } + + if port, err := extractPort((*l).Addr().String()); err == nil { + s.listeners[l] = listenerInfo{port} + } else { + s.logger.Errorf( + "Unable to extract port from listener %+v, will not be announced using SetQuicHeaders: %s", err) + s.listeners[l] = listenerInfo{} + } + s.generateAltSvcHeader() + return nil +} + +func (s *Server) removeListener(l *quic.EarlyListener) { + s.mutex.Lock() + delete(s.listeners, l) + s.generateAltSvcHeader() + s.mutex.Unlock() +} + +func (s *Server) handleConn(conn quic.Connection) error { + decoder := qpack.NewDecoder(nil) + + // send a SETTINGS frame + str, err := conn.OpenUniStream() + if err != nil { + return fmt.Errorf("opening the control stream failed: %w", err) + } + b := make([]byte, 0, 64) + b = quicvarint.Append(b, streamTypeControlStream) // stream type + b = (&settingsFrame{Datagram: s.EnableDatagrams, Other: s.AdditionalSettings}).Append(b) + str.Write(b) + + go s.handleUnidirectionalStreams(conn) + + // Process all requests immediately. + // It's the client's responsibility to decide which requests are eligible for 0-RTT. + for { + str, err := conn.AcceptStream(context.Background()) + if err != nil { + var appErr *quic.ApplicationError + if errors.As(err, &appErr) && appErr.ErrorCode == quic.ApplicationErrorCode(errorNoError) { + return nil + } + return fmt.Errorf("accepting stream failed: %w", err) + } + go func() { + rerr := s.handleRequest(conn, str, decoder, func() { + conn.CloseWithError(quic.ApplicationErrorCode(errorFrameUnexpected), "") + }) + if rerr.err == errHijacked { + return + } + if rerr.err != nil || rerr.streamErr != 0 || rerr.connErr != 0 { + s.logger.Debugf("Handling request failed: %s", err) + if rerr.streamErr != 0 { + str.CancelWrite(quic.StreamErrorCode(rerr.streamErr)) + } + if rerr.connErr != 0 { + var reason string + if rerr.err != nil { + reason = rerr.err.Error() + } + conn.CloseWithError(quic.ApplicationErrorCode(rerr.connErr), reason) + } + return + } + str.Close() + }() + } +} + +func (s *Server) handleUnidirectionalStreams(conn quic.Connection) { + for { + str, err := conn.AcceptUniStream(context.Background()) + if err != nil { + s.logger.Debugf("accepting unidirectional stream failed: %s", err) + return + } + + go func(str quic.ReceiveStream) { + streamType, err := quicvarint.Read(quicvarint.NewReader(str)) + if err != nil { + if s.UniStreamHijacker != nil && s.UniStreamHijacker(StreamType(streamType), conn, str, err) { + return + } + s.logger.Debugf("reading stream type on stream %d failed: %s", str.StreamID(), err) + return + } + // We're only interested in the control stream here. + switch streamType { + case streamTypeControlStream: + case streamTypeQPACKEncoderStream, streamTypeQPACKDecoderStream: + // Our QPACK implementation doesn't use the dynamic table yet. + // TODO: check that only one stream of each type is opened. + return + case streamTypePushStream: // only the server can push + conn.CloseWithError(quic.ApplicationErrorCode(errorStreamCreationError), "") + return + default: + if s.UniStreamHijacker != nil && s.UniStreamHijacker(StreamType(streamType), conn, str, nil) { + return + } + str.CancelRead(quic.StreamErrorCode(errorStreamCreationError)) + return + } + f, err := parseNextFrame(str, nil) + if err != nil { + conn.CloseWithError(quic.ApplicationErrorCode(errorFrameError), "") + return + } + sf, ok := f.(*settingsFrame) + if !ok { + conn.CloseWithError(quic.ApplicationErrorCode(errorMissingSettings), "") + return + } + if !sf.Datagram { + return + } + // If datagram support was enabled on our side as well as on the client side, + // we can expect it to have been negotiated both on the transport and on the HTTP/3 layer. + // Note: ConnectionState() will block until the handshake is complete (relevant when using 0-RTT). + if s.EnableDatagrams && !conn.ConnectionState().SupportsDatagrams { + conn.CloseWithError(quic.ApplicationErrorCode(errorSettingsError), "missing QUIC Datagram support") + } + }(str) + } +} + +func (s *Server) maxHeaderBytes() uint64 { + if s.MaxHeaderBytes <= 0 { + return http.DefaultMaxHeaderBytes + } + return uint64(s.MaxHeaderBytes) +} + +func (s *Server) handleRequest(conn quic.Connection, str quic.Stream, decoder *qpack.Decoder, onFrameError func()) requestError { + var ufh unknownFrameHandlerFunc + if s.StreamHijacker != nil { + ufh = func(ft FrameType, e error) (processed bool, err error) { return s.StreamHijacker(ft, conn, str, e) } + } + frame, err := parseNextFrame(str, ufh) + if err != nil { + if err == errHijacked { + return requestError{err: errHijacked} + } + return newStreamError(errorRequestIncomplete, err) + } + hf, ok := frame.(*headersFrame) + if !ok { + return newConnError(errorFrameUnexpected, errors.New("expected first frame to be a HEADERS frame")) + } + if hf.Length > s.maxHeaderBytes() { + return newStreamError(errorFrameError, fmt.Errorf("HEADERS frame too large: %d bytes (max: %d)", hf.Length, s.maxHeaderBytes())) + } + headerBlock := make([]byte, hf.Length) + if _, err := io.ReadFull(str, headerBlock); err != nil { + return newStreamError(errorRequestIncomplete, err) + } + hfs, err := decoder.DecodeFull(headerBlock) + if err != nil { + // TODO: use the right error code + return newConnError(errorGeneralProtocolError, err) + } + req, err := requestFromHeaders(hfs) + if err != nil { + // TODO: use the right error code + return newStreamError(errorGeneralProtocolError, err) + } + + connState := conn.ConnectionState().TLS.ConnectionState + req.TLS = &connState + req.RemoteAddr = conn.RemoteAddr().String() + body := newRequestBody(newStream(str, onFrameError)) + req.Body = body + + if s.logger.Debug() { + s.logger.Infof("%s %s%s, on stream %d", req.Method, req.Host, req.RequestURI, str.StreamID()) + } else { + s.logger.Infof("%s %s%s", req.Method, req.Host, req.RequestURI) + } + + ctx := str.Context() + ctx = context.WithValue(ctx, ServerContextKey, s) + ctx = context.WithValue(ctx, http.LocalAddrContextKey, conn.LocalAddr()) + req = req.WithContext(ctx) + r := newResponseWriter(str, conn, s.logger) + defer r.Flush() + handler := s.Handler + if handler == nil { + handler = http.DefaultServeMux + } + + var panicked bool + func() { + defer func() { + if p := recover(); p != nil { + panicked = true + if p == http.ErrAbortHandler { + return + } + // Copied from net/http/server.go + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + s.logger.Errorf("http: panic serving: %v\n%s", p, buf) + } + }() + handler.ServeHTTP(r, req) + }() + + if body.wasStreamHijacked() { + return requestError{err: errHijacked} + } + + if panicked { + r.WriteHeader(http.StatusInternalServerError) + } else { + r.WriteHeader(http.StatusOK) + } + // If the EOF was read by the handler, CancelRead() is a no-op. + str.CancelRead(quic.StreamErrorCode(errorNoError)) + return requestError{} +} + +// Close the server immediately, aborting requests and sending CONNECTION_CLOSE frames to connected clients. +// Close in combination with ListenAndServe() (instead of Serve()) may race if it is called before a UDP socket is established. +func (s *Server) Close() error { + s.mutex.Lock() + defer s.mutex.Unlock() + + s.closed = true + + var err error + for ln := range s.listeners { + if cerr := (*ln).Close(); cerr != nil && err == nil { + err = cerr + } + } + return err +} + +// CloseGracefully shuts down the server gracefully. The server sends a GOAWAY frame first, then waits for either timeout to trigger, or for all running requests to complete. +// CloseGracefully in combination with ListenAndServe() (instead of Serve()) may race if it is called before a UDP socket is established. +func (s *Server) CloseGracefully(timeout time.Duration) error { + // TODO: implement + return nil +} + +// ErrNoAltSvcPort is the error returned by SetQuicHeaders when no port was found +// for Alt-Svc to announce. This can happen if listening on a PacketConn without a port +// (UNIX socket, for example) and no port is specified in Server.Port or Server.Addr. +var ErrNoAltSvcPort = errors.New("no port can be announced, specify it explicitly using Server.Port or Server.Addr") + +// SetQuicHeaders can be used to set the proper headers that announce that this server supports HTTP/3. +// The values set by default advertise all of the ports the server is listening on, but can be +// changed to a specific port by setting Server.Port before launching the serverr. +// If no listener's Addr().String() returns an address with a valid port, Server.Addr will be used +// to extract the port, if specified. +// For example, a server launched using ListenAndServe on an address with port 443 would set: +// +// Alt-Svc: h3=":443"; ma=2592000,h3-29=":443"; ma=2592000 +func (s *Server) SetQuicHeaders(hdr http.Header) error { + s.mutex.RLock() + defer s.mutex.RUnlock() + + if s.altSvcHeader == "" { + return ErrNoAltSvcPort + } + // use the map directly to avoid constant canonicalization + // since the key is already canonicalized + hdr["Alt-Svc"] = append(hdr["Alt-Svc"], s.altSvcHeader) + return nil +} + +// ListenAndServeQUIC listens on the UDP network address addr and calls the +// handler for HTTP/3 requests on incoming connections. http.DefaultServeMux is +// used when handler is nil. +func ListenAndServeQUIC(addr, certFile, keyFile string, handler http.Handler) error { + server := &Server{ + Addr: addr, + Handler: handler, + } + return server.ListenAndServeTLS(certFile, keyFile) +} + +// ListenAndServe listens on the given network address for both, TLS and QUIC +// connections in parallel. It returns if one of the two returns an error. +// http.DefaultServeMux is used when handler is nil. +// The correct Alt-Svc headers for QUIC are set. +func ListenAndServe(addr, certFile, keyFile string, handler http.Handler) error { + // Load certs + var err error + certs := make([]tls.Certificate, 1) + certs[0], err = tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return err + } + // We currently only use the cert-related stuff from tls.Config, + // so we don't need to make a full copy. + config := &tls.Config{ + Certificates: certs, + } + + if addr == "" { + addr = ":https" + } + + // Open the listeners + udpAddr, err := net.ResolveUDPAddr("udp", addr) + if err != nil { + return err + } + udpConn, err := net.ListenUDP("udp", udpAddr) + if err != nil { + return err + } + defer udpConn.Close() + + if handler == nil { + handler = http.DefaultServeMux + } + // Start the servers + quicServer := &Server{ + TLSConfig: config, + Handler: handler, + } + + hErr := make(chan error) + qErr := make(chan error) + go func() { + hErr <- http.ListenAndServeTLS(addr, certFile, keyFile, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + quicServer.SetQuicHeaders(w.Header()) + handler.ServeHTTP(w, r) + })) + }() + go func() { + qErr <- quicServer.Serve(udpConn) + }() + + select { + case err := <-hErr: + quicServer.Close() + return err + case err := <-qErr: + // Cannot close the HTTP server or wait for requests to complete properly :/ + return err + } +} diff --git a/vendor/github.com/lucas-clemente/quic-go/interface.go b/vendor/github.com/quic-go/quic-go/interface.go similarity index 97% rename from vendor/github.com/lucas-clemente/quic-go/interface.go rename to vendor/github.com/quic-go/quic-go/interface.go index 214afcf1..e55f258e 100644 --- a/vendor/github.com/lucas-clemente/quic-go/interface.go +++ b/vendor/github.com/quic-go/quic-go/interface.go @@ -7,9 +7,9 @@ import ( "net" "time" - "github.com/lucas-clemente/quic-go/internal/handshake" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/logging" + "github.com/quic-go/quic-go/internal/handshake" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/logging" ) // The StreamID is the ID of a QUIC stream. @@ -176,7 +176,6 @@ type Connection interface { // Context returns a context that is cancelled when the connection is closed. Context() context.Context // ConnectionState returns basic details about the QUIC connection. - // It blocks until the handshake completes. // Warning: This API should not be considered stable and might change soon. ConnectionState() ConnectionState @@ -325,6 +324,11 @@ type Config struct { // This can be useful if version information is exchanged out-of-band. // It has no effect for a client. DisableVersionNegotiationPackets bool + // Allow0RTT allows the application to decide if a 0-RTT connection attempt should be accepted. + // When set, 0-RTT is enabled. When not set, 0-RTT is disabled. + // Only valid for the server. + // Warning: This API should not be considered stable and might change soon. + Allow0RTT func(net.Addr) bool // Enable QUIC datagram support (RFC 9221). EnableDatagrams bool Tracer logging.Tracer diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/ack_eliciting.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/ack_eliciting.go similarity index 80% rename from vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/ack_eliciting.go rename to vendor/github.com/quic-go/quic-go/internal/ackhandler/ack_eliciting.go index b8cd558a..4bab4190 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/ack_eliciting.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/ack_eliciting.go @@ -1,6 +1,6 @@ package ackhandler -import "github.com/lucas-clemente/quic-go/internal/wire" +import "github.com/quic-go/quic-go/internal/wire" // IsFrameAckEliciting returns true if the frame is ack-eliciting. func IsFrameAckEliciting(f wire.Frame) bool { @@ -10,7 +10,7 @@ func IsFrameAckEliciting(f wire.Frame) bool { } // HasAckElicitingFrames returns true if at least one frame is ack-eliciting. -func HasAckElicitingFrames(fs []Frame) bool { +func HasAckElicitingFrames(fs []*Frame) bool { for _, f := range fs { if IsFrameAckEliciting(f.Frame) { return true diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/ackhandler.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/ackhandler.go similarity index 73% rename from vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/ackhandler.go rename to vendor/github.com/quic-go/quic-go/internal/ackhandler/ackhandler.go index 2fc9ae4e..2c7cc4fc 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/ackhandler.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/ackhandler.go @@ -1,9 +1,9 @@ package ackhandler import ( - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/logging" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/logging" ) // NewAckHandler creates a new SentPacketHandler and a new ReceivedPacketHandler. @@ -17,8 +17,7 @@ func NewAckHandler( pers protocol.Perspective, tracer logging.ConnectionTracer, logger utils.Logger, - version protocol.VersionNumber, ) (SentPacketHandler, ReceivedPacketHandler) { sph := newSentPacketHandler(initialPacketNumber, initialMaxDatagramSize, rttStats, clientAddressValidated, pers, tracer, logger) - return sph, newReceivedPacketHandler(sph, rttStats, logger, version) + return sph, newReceivedPacketHandler(sph, rttStats, logger) } diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/frame.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/frame.go new file mode 100644 index 00000000..deb23cfc --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/frame.go @@ -0,0 +1,29 @@ +package ackhandler + +import ( + "sync" + + "github.com/quic-go/quic-go/internal/wire" +) + +type Frame struct { + wire.Frame // nil if the frame has already been acknowledged in another packet + OnLost func(wire.Frame) + OnAcked func(wire.Frame) +} + +var framePool = sync.Pool{New: func() any { return &Frame{} }} + +func GetFrame() *Frame { + f := framePool.Get().(*Frame) + f.OnLost = nil + f.OnAcked = nil + return f +} + +func putFrame(f *Frame) { + f.Frame = nil + f.OnLost = nil + f.OnAcked = nil + framePool.Put(f) +} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/interfaces.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go similarity index 94% rename from vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/interfaces.go rename to vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go index 9079fb0b..5924f84b 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/interfaces.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/interfaces.go @@ -3,8 +3,8 @@ package ackhandler import ( "time" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/wire" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/wire" ) // SentPacketHandler handles ACKs received for outgoing packets diff --git a/vendor/github.com/quic-go/quic-go/internal/ackhandler/mockgen.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/mockgen.go new file mode 100644 index 00000000..366e5520 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/mockgen.go @@ -0,0 +1,3 @@ +package ackhandler + +//go:generate sh -c "../../mockgen_private.sh ackhandler mock_sent_packet_tracker_test.go github.com/quic-go/quic-go/internal/ackhandler sentPacketTracker" diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet.go similarity index 87% rename from vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet.go rename to vendor/github.com/quic-go/quic-go/internal/ackhandler/packet.go index b8a47b7a..394ee40a 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet.go @@ -4,13 +4,13 @@ import ( "sync" "time" - "github.com/lucas-clemente/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/protocol" ) // A Packet is a packet type Packet struct { PacketNumber protocol.PacketNumber - Frames []Frame + Frames []*Frame LargestAcked protocol.PacketNumber // InvalidPacketNumber if the packet doesn't contain an ACK Length protocol.ByteCount EncryptionLevel protocol.EncryptionLevel @@ -46,4 +46,10 @@ func GetPacket() *Packet { // We currently only return Packets back into the pool when they're acknowledged (not when they're lost). // This simplifies the code, and gives the vast majority of the performance benefit we can gain from using the pool. -func putPacket(p *Packet) { packetPool.Put(p) } +func putPacket(p *Packet) { + for _, f := range p.Frames { + putFrame(f) + } + p.Frames = nil + packetPool.Put(p) +} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet_number_generator.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet_number_generator.go similarity index 94% rename from vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet_number_generator.go rename to vendor/github.com/quic-go/quic-go/internal/ackhandler/packet_number_generator.go index b63083bf..9cf20a0b 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet_number_generator.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/packet_number_generator.go @@ -1,8 +1,8 @@ package ackhandler import ( - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" ) type packetNumberGenerator interface { diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_handler.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go similarity index 90% rename from vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_handler.go rename to vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go index b233f573..3675694f 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_handler.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_handler.go @@ -4,9 +4,9 @@ import ( "fmt" "time" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" ) type receivedPacketHandler struct { @@ -25,13 +25,12 @@ func newReceivedPacketHandler( sentPackets sentPacketTracker, rttStats *utils.RTTStats, logger utils.Logger, - version protocol.VersionNumber, ) ReceivedPacketHandler { return &receivedPacketHandler{ sentPackets: sentPackets, - initialPackets: newReceivedPacketTracker(rttStats, logger, version), - handshakePackets: newReceivedPacketTracker(rttStats, logger, version), - appDataPackets: newReceivedPacketTracker(rttStats, logger, version), + initialPackets: newReceivedPacketTracker(rttStats, logger), + handshakePackets: newReceivedPacketTracker(rttStats, logger), + appDataPackets: newReceivedPacketTracker(rttStats, logger), lowest1RTTPacket: protocol.InvalidPacketNumber, } } diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_history.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_history.go similarity index 92% rename from vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_history.go rename to vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_history.go index d4d28cc4..3143bfe1 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_history.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_history.go @@ -1,9 +1,11 @@ package ackhandler import ( - "github.com/lucas-clemente/quic-go/internal/protocol" - list "github.com/lucas-clemente/quic-go/internal/utils/linkedlist" - "github.com/lucas-clemente/quic-go/internal/wire" + "sync" + + "github.com/quic-go/quic-go/internal/protocol" + list "github.com/quic-go/quic-go/internal/utils/linkedlist" + "github.com/quic-go/quic-go/internal/wire" ) // interval is an interval from one PacketNumber to the other @@ -12,6 +14,12 @@ type interval struct { End protocol.PacketNumber } +var intervalElementPool sync.Pool + +func init() { + intervalElementPool = *list.NewPool[interval]() +} + // The receivedPacketHistory stores if a packet number has already been received. // It generates ACK ranges which can be used to assemble an ACK frame. // It does not store packet contents. @@ -23,7 +31,7 @@ type receivedPacketHistory struct { func newReceivedPacketHistory() *receivedPacketHistory { return &receivedPacketHistory{ - ranges: list.New[interval](), + ranges: list.NewWithPool[interval](&intervalElementPool), } } diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_tracker.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_tracker.go similarity index 95% rename from vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_tracker.go rename to vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_tracker.go index 0bd5ff88..7132ccaa 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_tracker.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/received_packet_tracker.go @@ -4,9 +4,9 @@ import ( "fmt" "time" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" ) // number of ack-eliciting packets received before sending an ack. @@ -31,21 +31,17 @@ type receivedPacketTracker struct { lastAck *wire.AckFrame logger utils.Logger - - version protocol.VersionNumber } func newReceivedPacketTracker( rttStats *utils.RTTStats, logger utils.Logger, - version protocol.VersionNumber, ) *receivedPacketTracker { return &receivedPacketTracker{ packetHistory: newReceivedPacketHistory(), maxAckDelay: protocol.MaxAckDelay, rttStats: rttStats, logger: logger, - version: version, } } diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/send_mode.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/send_mode.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/send_mode.go rename to vendor/github.com/quic-go/quic-go/internal/ackhandler/send_mode.go diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_handler.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go similarity index 98% rename from vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_handler.go rename to vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go index 12ffb918..732bbc3a 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_handler.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_handler.go @@ -5,12 +5,12 @@ import ( "fmt" "time" - "github.com/lucas-clemente/quic-go/internal/congestion" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" - "github.com/lucas-clemente/quic-go/logging" + "github.com/quic-go/quic-go/internal/congestion" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" + "github.com/quic-go/quic-go/logging" ) const ( diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_history.go b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go similarity index 87% rename from vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_history.go rename to vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go index 7e569f95..06478399 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_history.go +++ b/vendor/github.com/quic-go/quic-go/internal/ackhandler/sent_packet_history.go @@ -2,11 +2,12 @@ package ackhandler import ( "fmt" + "sync" "time" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - list "github.com/lucas-clemente/quic-go/internal/utils/linkedlist" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + list "github.com/quic-go/quic-go/internal/utils/linkedlist" ) type sentPacketHistory struct { @@ -17,11 +18,17 @@ type sentPacketHistory struct { highestSent protocol.PacketNumber } +var packetElementPool sync.Pool + +func init() { + packetElementPool = *list.NewPool[*Packet]() +} + func newSentPacketHistory(rttStats *utils.RTTStats) *sentPacketHistory { return &sentPacketHistory{ rttStats: rttStats, - outstandingPacketList: list.New[*Packet](), - etcPacketList: list.New[*Packet](), + outstandingPacketList: list.NewWithPool[*Packet](&packetElementPool), + etcPacketList: list.NewWithPool[*Packet](&packetElementPool), packetMap: make(map[protocol.PacketNumber]*list.Element[*Packet]), highestSent: protocol.InvalidPacketNumber, } @@ -108,8 +115,7 @@ func (h *sentPacketHistory) Remove(p protocol.PacketNumber) error { if !ok { return fmt.Errorf("packet %d not found in sent packet history", p) } - h.outstandingPacketList.Remove(el) - h.etcPacketList.Remove(el) + el.List().Remove(el) delete(h.packetMap, p) return nil } @@ -139,10 +145,7 @@ func (h *sentPacketHistory) DeclareLost(p *Packet) *Packet { if !ok { return nil } - // try to remove it from both lists, as we don't know which one it currently belongs to. - // Remove is a no-op for elements that are not in the list. - h.outstandingPacketList.Remove(el) - h.etcPacketList.Remove(el) + el.List().Remove(el) p.declaredLost = true // move it to the correct position in the etc list (based on the packet number) for el = h.etcPacketList.Back(); el != nil; el = el.Prev() { diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/bandwidth.go b/vendor/github.com/quic-go/quic-go/internal/congestion/bandwidth.go similarity index 91% rename from vendor/github.com/lucas-clemente/quic-go/internal/congestion/bandwidth.go rename to vendor/github.com/quic-go/quic-go/internal/congestion/bandwidth.go index 96b1c5aa..1d03abbb 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/bandwidth.go +++ b/vendor/github.com/quic-go/quic-go/internal/congestion/bandwidth.go @@ -4,7 +4,7 @@ import ( "math" "time" - "github.com/lucas-clemente/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/protocol" ) // Bandwidth of a connection diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/clock.go b/vendor/github.com/quic-go/quic-go/internal/congestion/clock.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/internal/congestion/clock.go rename to vendor/github.com/quic-go/quic-go/internal/congestion/clock.go diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic.go b/vendor/github.com/quic-go/quic-go/internal/congestion/cubic.go similarity index 98% rename from vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic.go rename to vendor/github.com/quic-go/quic-go/internal/congestion/cubic.go index a4155b83..a73cf82a 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic.go +++ b/vendor/github.com/quic-go/quic-go/internal/congestion/cubic.go @@ -4,8 +4,8 @@ import ( "math" "time" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" ) // This cubic implementation is based on the one found in Chromiums's QUIC diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic_sender.go b/vendor/github.com/quic-go/quic-go/internal/congestion/cubic_sender.go similarity index 98% rename from vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic_sender.go rename to vendor/github.com/quic-go/quic-go/internal/congestion/cubic_sender.go index 1d17ce22..dac3118e 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic_sender.go +++ b/vendor/github.com/quic-go/quic-go/internal/congestion/cubic_sender.go @@ -4,9 +4,9 @@ import ( "fmt" "time" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/logging" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/logging" ) const ( diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/hybrid_slow_start.go b/vendor/github.com/quic-go/quic-go/internal/congestion/hybrid_slow_start.go similarity index 97% rename from vendor/github.com/lucas-clemente/quic-go/internal/congestion/hybrid_slow_start.go rename to vendor/github.com/quic-go/quic-go/internal/congestion/hybrid_slow_start.go index 0088d7e8..b2f7c908 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/hybrid_slow_start.go +++ b/vendor/github.com/quic-go/quic-go/internal/congestion/hybrid_slow_start.go @@ -3,8 +3,8 @@ package congestion import ( "time" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" ) // Note(pwestin): the magic clamping numbers come from the original code in diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/interface.go b/vendor/github.com/quic-go/quic-go/internal/congestion/interface.go similarity index 94% rename from vendor/github.com/lucas-clemente/quic-go/internal/congestion/interface.go rename to vendor/github.com/quic-go/quic-go/internal/congestion/interface.go index 5157383f..5db3ebae 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/interface.go +++ b/vendor/github.com/quic-go/quic-go/internal/congestion/interface.go @@ -3,7 +3,7 @@ package congestion import ( "time" - "github.com/lucas-clemente/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/protocol" ) // A SendAlgorithm performs congestion control diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/pacer.go b/vendor/github.com/quic-go/quic-go/internal/congestion/pacer.go similarity index 95% rename from vendor/github.com/lucas-clemente/quic-go/internal/congestion/pacer.go rename to vendor/github.com/quic-go/quic-go/internal/congestion/pacer.go index 6561a32c..a5861062 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/pacer.go +++ b/vendor/github.com/quic-go/quic-go/internal/congestion/pacer.go @@ -4,8 +4,8 @@ import ( "math" "time" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" ) const maxBurstSizePackets = 10 diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/base_flow_controller.go b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/base_flow_controller.go similarity index 97% rename from vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/base_flow_controller.go rename to vendor/github.com/quic-go/quic-go/internal/flowcontrol/base_flow_controller.go index a58af2d9..f3f24a60 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/base_flow_controller.go +++ b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/base_flow_controller.go @@ -4,8 +4,8 @@ import ( "sync" "time" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" ) type baseFlowController struct { diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/connection_flow_controller.go b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go similarity index 95% rename from vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/connection_flow_controller.go rename to vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go index dbd3973e..13e69d6c 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/connection_flow_controller.go +++ b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/connection_flow_controller.go @@ -5,9 +5,9 @@ import ( "fmt" "time" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/utils" ) type connectionFlowController struct { diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/interface.go b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go similarity index 95% rename from vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/interface.go rename to vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go index 1eeaee9f..946519d5 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/interface.go +++ b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/interface.go @@ -1,6 +1,6 @@ package flowcontrol -import "github.com/lucas-clemente/quic-go/internal/protocol" +import "github.com/quic-go/quic-go/internal/protocol" type flowController interface { // for sending diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/stream_flow_controller.go b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go similarity index 96% rename from vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/stream_flow_controller.go rename to vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go index d9bf649a..1770a9c8 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/stream_flow_controller.go +++ b/vendor/github.com/quic-go/quic-go/internal/flowcontrol/stream_flow_controller.go @@ -3,9 +3,9 @@ package flowcontrol import ( "fmt" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/utils" ) type streamFlowController struct { diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/aead.go b/vendor/github.com/quic-go/quic-go/internal/handshake/aead.go similarity index 96% rename from vendor/github.com/lucas-clemente/quic-go/internal/handshake/aead.go rename to vendor/github.com/quic-go/quic-go/internal/handshake/aead.go index 35e3dcf1..410745f1 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/aead.go +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/aead.go @@ -4,9 +4,9 @@ import ( "crypto/cipher" "encoding/binary" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qtls" - "github.com/lucas-clemente/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qtls" + "github.com/quic-go/quic-go/internal/utils" ) func createAEAD(suite *qtls.CipherSuiteTLS13, trafficSecret []byte, v protocol.VersionNumber) cipher.AEAD { diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/crypto_setup.go b/vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go similarity index 93% rename from vendor/github.com/lucas-clemente/quic-go/internal/handshake/crypto_setup.go rename to vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go index f928d17d..ec14868c 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/crypto_setup.go +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/crypto_setup.go @@ -6,17 +6,18 @@ import ( "errors" "fmt" "io" + "math" "net" "sync" "time" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/qtls" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" - "github.com/lucas-clemente/quic-go/logging" - "github.com/lucas-clemente/quic-go/quicvarint" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/qtls" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" + "github.com/quic-go/quic-go/logging" + "github.com/quic-go/quic-go/quicvarint" ) // TLS unexpected_message alert @@ -115,6 +116,7 @@ type cryptoSetup struct { clientHelloWritten bool clientHelloWrittenChan chan struct{} // is closed as soon as the ClientHello is written zeroRTTParametersChan chan<- *wire.TransportParameters + allow0RTT func() bool rttStats *utils.RTTStats @@ -195,7 +197,7 @@ func NewCryptoSetupServer( tp *wire.TransportParameters, runner handshakeRunner, tlsConf *tls.Config, - enable0RTT bool, + allow0RTT func() bool, rttStats *utils.RTTStats, tracer logging.ConnectionTracer, logger utils.Logger, @@ -208,13 +210,14 @@ func NewCryptoSetupServer( tp, runner, tlsConf, - enable0RTT, + allow0RTT != nil, rttStats, tracer, logger, protocol.PerspectiveServer, version, ) + cs.allow0RTT = allow0RTT cs.conn = qtls.Server(newConn(localAddr, remoteAddr, version), cs.tlsConf, cs.extraConf) return cs } @@ -260,14 +263,14 @@ func newCryptoSetup( alertChan: make(chan uint8), clientHelloWrittenChan: make(chan struct{}), zeroRTTParametersChan: zeroRTTParametersChan, - messageChan: make(chan []byte, 100), + messageChan: make(chan []byte, 1), isReadingHandshakeMessage: make(chan struct{}), closeChan: make(chan struct{}), version: version, } var maxEarlyData uint32 if enable0RTT { - maxEarlyData = 0xffffffff + maxEarlyData = math.MaxUint32 } cs.extraConf = &qtls.ExtraConfig{ GetExtensions: extHandler.GetExtensions, @@ -365,8 +368,15 @@ func (h *cryptoSetup) HandleMessage(data []byte, encLevel protocol.EncryptionLev h.onError(alertUnexpectedMessage, err.Error()) return false } - h.messageChan <- data + if encLevel != protocol.Encryption1RTT { + select { + case h.messageChan <- data: + case <-h.handshakeDone: // handshake errored, nobody is going to consume this message + return false + } + } if encLevel == protocol.Encryption1RTT { + h.messageChan <- data h.handlePostHandshakeMessage() return false } @@ -490,13 +500,17 @@ func (h *cryptoSetup) accept0RTT(sessionTicketData []byte) bool { return false } valid := h.ourParams.ValidFor0RTT(t.Parameters) - if valid { - h.logger.Debugf("Accepting 0-RTT. Restoring RTT from session ticket: %s", t.RTT) - h.rttStats.SetInitialRTT(t.RTT) - } else { + if !valid { h.logger.Debugf("Transport parameters changed. Rejecting 0-RTT.") + return false } - return valid + if !h.allow0RTT() { + h.logger.Debugf("0-RTT not allowed. Rejecting 0-RTT.") + return false + } + h.logger.Debugf("Accepting 0-RTT. Restoring RTT from session ticket: %s", t.RTT) + h.rttStats.SetInitialRTT(t.RTT) + return true } // rejected0RTT is called for the client when the server rejects 0-RTT. @@ -575,7 +589,9 @@ func (h *cryptoSetup) SetReadKey(encLevel qtls.EncryptionLevel, suite *qtls.Ciph newHeaderProtector(suite, trafficSecret, true, h.version), ) h.mutex.Unlock() - h.logger.Debugf("Installed 0-RTT Read keys (using %s)", tls.CipherSuiteName(suite.ID)) + if h.logger.Debug() { + h.logger.Debugf("Installed 0-RTT Read keys (using %s)", tls.CipherSuiteName(suite.ID)) + } if h.tracer != nil { h.tracer.UpdatedKeyFromTLS(protocol.Encryption0RTT, h.perspective.Opposite()) } @@ -588,12 +604,16 @@ func (h *cryptoSetup) SetReadKey(encLevel qtls.EncryptionLevel, suite *qtls.Ciph h.dropInitialKeys, h.perspective, ) - h.logger.Debugf("Installed Handshake Read keys (using %s)", tls.CipherSuiteName(suite.ID)) + if h.logger.Debug() { + h.logger.Debugf("Installed Handshake Read keys (using %s)", tls.CipherSuiteName(suite.ID)) + } case qtls.EncryptionApplication: h.readEncLevel = protocol.Encryption1RTT h.aead.SetReadKey(suite, trafficSecret) h.has1RTTOpener = true - h.logger.Debugf("Installed 1-RTT Read keys (using %s)", tls.CipherSuiteName(suite.ID)) + if h.logger.Debug() { + h.logger.Debugf("Installed 1-RTT Read keys (using %s)", tls.CipherSuiteName(suite.ID)) + } default: panic("unexpected read encryption level") } @@ -615,7 +635,9 @@ func (h *cryptoSetup) SetWriteKey(encLevel qtls.EncryptionLevel, suite *qtls.Cip newHeaderProtector(suite, trafficSecret, true, h.version), ) h.mutex.Unlock() - h.logger.Debugf("Installed 0-RTT Write keys (using %s)", tls.CipherSuiteName(suite.ID)) + if h.logger.Debug() { + h.logger.Debugf("Installed 0-RTT Write keys (using %s)", tls.CipherSuiteName(suite.ID)) + } if h.tracer != nil { h.tracer.UpdatedKeyFromTLS(protocol.Encryption0RTT, h.perspective) } @@ -628,12 +650,16 @@ func (h *cryptoSetup) SetWriteKey(encLevel qtls.EncryptionLevel, suite *qtls.Cip h.dropInitialKeys, h.perspective, ) - h.logger.Debugf("Installed Handshake Write keys (using %s)", tls.CipherSuiteName(suite.ID)) + if h.logger.Debug() { + h.logger.Debugf("Installed Handshake Write keys (using %s)", tls.CipherSuiteName(suite.ID)) + } case qtls.EncryptionApplication: h.writeEncLevel = protocol.Encryption1RTT h.aead.SetWriteKey(suite, trafficSecret) h.has1RTTSealer = true - h.logger.Debugf("Installed 1-RTT Write keys (using %s)", tls.CipherSuiteName(suite.ID)) + if h.logger.Debug() { + h.logger.Debugf("Installed 1-RTT Write keys (using %s)", tls.CipherSuiteName(suite.ID)) + } if h.zeroRTTSealer != nil { h.zeroRTTSealer = nil h.logger.Debugf("Dropping 0-RTT keys.") diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/header_protector.go b/vendor/github.com/quic-go/quic-go/internal/handshake/header_protector.go similarity index 97% rename from vendor/github.com/lucas-clemente/quic-go/internal/handshake/header_protector.go rename to vendor/github.com/quic-go/quic-go/internal/handshake/header_protector.go index 1f800c50..274fb30c 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/header_protector.go +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/header_protector.go @@ -9,8 +9,8 @@ import ( "golang.org/x/crypto/chacha20" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qtls" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qtls" ) type headerProtector interface { diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/hkdf.go b/vendor/github.com/quic-go/quic-go/internal/handshake/hkdf.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/internal/handshake/hkdf.go rename to vendor/github.com/quic-go/quic-go/internal/handshake/hkdf.go diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/initial_aead.go b/vendor/github.com/quic-go/quic-go/internal/handshake/initial_aead.go similarity index 91% rename from vendor/github.com/lucas-clemente/quic-go/internal/handshake/initial_aead.go rename to vendor/github.com/quic-go/quic-go/internal/handshake/initial_aead.go index 6128147c..3967fdb8 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/initial_aead.go +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/initial_aead.go @@ -6,14 +6,14 @@ import ( "golang.org/x/crypto/hkdf" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qtls" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qtls" ) var ( quicSaltOld = []byte{0xaf, 0xbf, 0xec, 0x28, 0x99, 0x93, 0xd2, 0x4c, 0x9e, 0x97, 0x86, 0xf1, 0x9c, 0x61, 0x11, 0xe0, 0x43, 0x90, 0xa8, 0x99} quicSaltV1 = []byte{0x38, 0x76, 0x2c, 0xf7, 0xf5, 0x59, 0x34, 0xb3, 0x4d, 0x17, 0x9a, 0xe6, 0xa4, 0xc8, 0x0c, 0xad, 0xcc, 0xbb, 0x7f, 0x0a} - quicSaltV2 = []byte{0xa7, 0x07, 0xc2, 0x03, 0xa5, 0x9b, 0x47, 0x18, 0x4a, 0x1d, 0x62, 0xca, 0x57, 0x04, 0x06, 0xea, 0x7a, 0xe3, 0xe5, 0xd3} + quicSaltV2 = []byte{0x0d, 0xed, 0xe3, 0xde, 0xf7, 0x00, 0xa6, 0xdb, 0x81, 0x93, 0x81, 0xbe, 0x6e, 0x26, 0x9d, 0xcb, 0xf9, 0xbd, 0x2e, 0xd9} ) const ( diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/interface.go b/vendor/github.com/quic-go/quic-go/internal/handshake/interface.go similarity index 95% rename from vendor/github.com/lucas-clemente/quic-go/internal/handshake/interface.go rename to vendor/github.com/quic-go/quic-go/internal/handshake/interface.go index 112f6c25..e7baea90 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/interface.go +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/interface.go @@ -6,9 +6,9 @@ import ( "net" "time" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qtls" - "github.com/lucas-clemente/quic-go/internal/wire" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qtls" + "github.com/quic-go/quic-go/internal/wire" ) var ( diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/mockgen.go b/vendor/github.com/quic-go/quic-go/internal/handshake/mockgen.go new file mode 100644 index 00000000..f91e7e8a --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/mockgen.go @@ -0,0 +1,3 @@ +package handshake + +//go:generate sh -c "../../mockgen_private.sh handshake mock_handshake_runner_test.go github.com/quic-go/quic-go/internal/handshake handshakeRunner" diff --git a/vendor/github.com/quic-go/quic-go/internal/handshake/retry.go b/vendor/github.com/quic-go/quic-go/internal/handshake/retry.go new file mode 100644 index 00000000..ff14f7e0 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/retry.go @@ -0,0 +1,70 @@ +package handshake + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "fmt" + "sync" + + "github.com/quic-go/quic-go/internal/protocol" +) + +var ( + retryAEADdraft29 cipher.AEAD // used for QUIC draft versions up to 34 + retryAEADv1 cipher.AEAD // used for QUIC v1 (RFC 9000) + retryAEADv2 cipher.AEAD // used for QUIC v2 +) + +func init() { + retryAEADdraft29 = initAEAD([16]byte{0xcc, 0xce, 0x18, 0x7e, 0xd0, 0x9a, 0x09, 0xd0, 0x57, 0x28, 0x15, 0x5a, 0x6c, 0xb9, 0x6b, 0xe1}) + retryAEADv1 = initAEAD([16]byte{0xbe, 0x0c, 0x69, 0x0b, 0x9f, 0x66, 0x57, 0x5a, 0x1d, 0x76, 0x6b, 0x54, 0xe3, 0x68, 0xc8, 0x4e}) + retryAEADv2 = initAEAD([16]byte{0x8f, 0xb4, 0xb0, 0x1b, 0x56, 0xac, 0x48, 0xe2, 0x60, 0xfb, 0xcb, 0xce, 0xad, 0x7c, 0xcc, 0x92}) +} + +func initAEAD(key [16]byte) cipher.AEAD { + aes, err := aes.NewCipher(key[:]) + if err != nil { + panic(err) + } + aead, err := cipher.NewGCM(aes) + if err != nil { + panic(err) + } + return aead +} + +var ( + retryBuf bytes.Buffer + retryMutex sync.Mutex + retryNonceDraft29 = [12]byte{0xe5, 0x49, 0x30, 0xf9, 0x7f, 0x21, 0x36, 0xf0, 0x53, 0x0a, 0x8c, 0x1c} + retryNonceV1 = [12]byte{0x46, 0x15, 0x99, 0xd3, 0x5d, 0x63, 0x2b, 0xf2, 0x23, 0x98, 0x25, 0xbb} + retryNonceV2 = [12]byte{0xd8, 0x69, 0x69, 0xbc, 0x2d, 0x7c, 0x6d, 0x99, 0x90, 0xef, 0xb0, 0x4a} +) + +// GetRetryIntegrityTag calculates the integrity tag on a Retry packet +func GetRetryIntegrityTag(retry []byte, origDestConnID protocol.ConnectionID, version protocol.VersionNumber) *[16]byte { + retryMutex.Lock() + defer retryMutex.Unlock() + + retryBuf.WriteByte(uint8(origDestConnID.Len())) + retryBuf.Write(origDestConnID.Bytes()) + retryBuf.Write(retry) + defer retryBuf.Reset() + + var tag [16]byte + var sealed []byte + //nolint:exhaustive // These are all the versions we support + switch version { + case protocol.Version1: + sealed = retryAEADv1.Seal(tag[:0], retryNonceV1[:], nil, retryBuf.Bytes()) + case protocol.Version2: + sealed = retryAEADv2.Seal(tag[:0], retryNonceV2[:], nil, retryBuf.Bytes()) + default: + sealed = retryAEADdraft29.Seal(tag[:0], retryNonceDraft29[:], nil, retryBuf.Bytes()) + } + if len(sealed) != 16 { + panic(fmt.Sprintf("unexpected Retry integrity tag length: %d", len(sealed))) + } + return &tag +} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/session_ticket.go b/vendor/github.com/quic-go/quic-go/internal/handshake/session_ticket.go similarity index 91% rename from vendor/github.com/lucas-clemente/quic-go/internal/handshake/session_ticket.go rename to vendor/github.com/quic-go/quic-go/internal/handshake/session_ticket.go index 58b57c5a..56bcbcd5 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/session_ticket.go +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/session_ticket.go @@ -6,8 +6,8 @@ import ( "fmt" "time" - "github.com/lucas-clemente/quic-go/internal/wire" - "github.com/lucas-clemente/quic-go/quicvarint" + "github.com/quic-go/quic-go/internal/wire" + "github.com/quic-go/quic-go/quicvarint" ) const sessionTicketRevision = 2 diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/tls_extension_handler.go b/vendor/github.com/quic-go/quic-go/internal/handshake/tls_extension_handler.go similarity index 92% rename from vendor/github.com/lucas-clemente/quic-go/internal/handshake/tls_extension_handler.go rename to vendor/github.com/quic-go/quic-go/internal/handshake/tls_extension_handler.go index 3a679034..6105fe40 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/tls_extension_handler.go +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/tls_extension_handler.go @@ -1,8 +1,8 @@ package handshake import ( - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qtls" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qtls" ) const ( @@ -24,7 +24,7 @@ var _ tlsExtensionHandler = &extensionHandler{} // newExtensionHandler creates a new extension handler func newExtensionHandler(params []byte, pers protocol.Perspective, v protocol.VersionNumber) tlsExtensionHandler { et := uint16(quicTLSExtensionType) - if v != protocol.Version1 { + if v == protocol.VersionDraft29 { et = quicTLSExtensionTypeOldDrafts } return &extensionHandler{ diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/token_generator.go b/vendor/github.com/quic-go/quic-go/internal/handshake/token_generator.go similarity index 98% rename from vendor/github.com/lucas-clemente/quic-go/internal/handshake/token_generator.go rename to vendor/github.com/quic-go/quic-go/internal/handshake/token_generator.go index cda49466..e5e90bb3 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/token_generator.go +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/token_generator.go @@ -8,7 +8,7 @@ import ( "net" "time" - "github.com/lucas-clemente/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/protocol" ) const ( diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/token_protector.go b/vendor/github.com/quic-go/quic-go/internal/handshake/token_protector.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/internal/handshake/token_protector.go rename to vendor/github.com/quic-go/quic-go/internal/handshake/token_protector.go diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/updatable_aead.go b/vendor/github.com/quic-go/quic-go/internal/handshake/updatable_aead.go similarity index 97% rename from vendor/github.com/lucas-clemente/quic-go/internal/handshake/updatable_aead.go rename to vendor/github.com/quic-go/quic-go/internal/handshake/updatable_aead.go index 4093a206..89a9dcd6 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/updatable_aead.go +++ b/vendor/github.com/quic-go/quic-go/internal/handshake/updatable_aead.go @@ -8,11 +8,11 @@ import ( "fmt" "time" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/qtls" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/logging" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/qtls" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/logging" ) // KeyUpdateInterval is the maximum number of packets we send or receive before initiating a key update. diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/logutils/frame.go b/vendor/github.com/quic-go/quic-go/internal/logutils/frame.go similarity index 89% rename from vendor/github.com/lucas-clemente/quic-go/internal/logutils/frame.go rename to vendor/github.com/quic-go/quic-go/internal/logutils/frame.go index c894be21..a6032fc2 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/logutils/frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/logutils/frame.go @@ -1,9 +1,9 @@ package logutils import ( - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/wire" - "github.com/lucas-clemente/quic-go/logging" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/wire" + "github.com/quic-go/quic-go/logging" ) // ConvertFrame converts a wire.Frame into a logging.Frame. diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/connection_id.go b/vendor/github.com/quic-go/quic-go/internal/protocol/connection_id.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/internal/protocol/connection_id.go rename to vendor/github.com/quic-go/quic-go/internal/protocol/connection_id.go diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/encryption_level.go b/vendor/github.com/quic-go/quic-go/internal/protocol/encryption_level.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/internal/protocol/encryption_level.go rename to vendor/github.com/quic-go/quic-go/internal/protocol/encryption_level.go diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/key_phase.go b/vendor/github.com/quic-go/quic-go/internal/protocol/key_phase.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/internal/protocol/key_phase.go rename to vendor/github.com/quic-go/quic-go/internal/protocol/key_phase.go diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/packet_number.go b/vendor/github.com/quic-go/quic-go/internal/protocol/packet_number.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/internal/protocol/packet_number.go rename to vendor/github.com/quic-go/quic-go/internal/protocol/packet_number.go diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/params.go b/vendor/github.com/quic-go/quic-go/internal/protocol/params.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/internal/protocol/params.go rename to vendor/github.com/quic-go/quic-go/internal/protocol/params.go diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/perspective.go b/vendor/github.com/quic-go/quic-go/internal/protocol/perspective.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/internal/protocol/perspective.go rename to vendor/github.com/quic-go/quic-go/internal/protocol/perspective.go diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/protocol.go b/vendor/github.com/quic-go/quic-go/internal/protocol/protocol.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/internal/protocol/protocol.go rename to vendor/github.com/quic-go/quic-go/internal/protocol/protocol.go diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/stream.go b/vendor/github.com/quic-go/quic-go/internal/protocol/stream.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/internal/protocol/stream.go rename to vendor/github.com/quic-go/quic-go/internal/protocol/stream.go diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/version.go b/vendor/github.com/quic-go/quic-go/internal/protocol/version.go similarity index 98% rename from vendor/github.com/lucas-clemente/quic-go/internal/protocol/version.go rename to vendor/github.com/quic-go/quic-go/internal/protocol/version.go index dd54dbd3..2ae7a115 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/version.go +++ b/vendor/github.com/quic-go/quic-go/internal/protocol/version.go @@ -23,7 +23,7 @@ const ( VersionUnknown VersionNumber = math.MaxUint32 VersionDraft29 VersionNumber = 0xff00001d Version1 VersionNumber = 0x1 - Version2 VersionNumber = 0x709a50c4 + Version2 VersionNumber = 0x6b3343cf ) // SupportedVersions lists the versions that the server supports diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/qerr/error_codes.go b/vendor/github.com/quic-go/quic-go/internal/qerr/error_codes.go similarity index 97% rename from vendor/github.com/lucas-clemente/quic-go/internal/qerr/error_codes.go rename to vendor/github.com/quic-go/quic-go/internal/qerr/error_codes.go index f56f91a2..cc846df6 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/qerr/error_codes.go +++ b/vendor/github.com/quic-go/quic-go/internal/qerr/error_codes.go @@ -3,7 +3,7 @@ package qerr import ( "fmt" - "github.com/lucas-clemente/quic-go/internal/qtls" + "github.com/quic-go/quic-go/internal/qtls" ) // TransportErrorCode is a QUIC transport error. diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/qerr/errors.go b/vendor/github.com/quic-go/quic-go/internal/qerr/errors.go similarity index 98% rename from vendor/github.com/lucas-clemente/quic-go/internal/qerr/errors.go rename to vendor/github.com/quic-go/quic-go/internal/qerr/errors.go index 3f0208d6..26ea3445 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/qerr/errors.go +++ b/vendor/github.com/quic-go/quic-go/internal/qerr/errors.go @@ -4,7 +4,7 @@ import ( "fmt" "net" - "github.com/lucas-clemente/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/protocol" ) var ( diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go119.go b/vendor/github.com/quic-go/quic-go/internal/qtls/go119.go similarity index 94% rename from vendor/github.com/lucas-clemente/quic-go/internal/qtls/go119.go rename to vendor/github.com/quic-go/quic-go/internal/qtls/go119.go index 9794b872..6c804cce 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go119.go +++ b/vendor/github.com/quic-go/quic-go/internal/qtls/go119.go @@ -1,4 +1,4 @@ -//go:build go1.19 +//go:build go1.19 && !go1.20 package qtls @@ -9,7 +9,7 @@ import ( "net" "unsafe" - "github.com/marten-seemann/qtls-go1-19" + "github.com/quic-go/qtls-go1-19" ) type ( @@ -83,7 +83,7 @@ type cipherSuiteTLS13 struct { Hash crypto.Hash } -//go:linkname cipherSuiteTLS13ByID github.com/marten-seemann/qtls-go1-19.cipherSuiteTLS13ByID +//go:linkname cipherSuiteTLS13ByID github.com/quic-go/qtls-go1-19.cipherSuiteTLS13ByID func cipherSuiteTLS13ByID(id uint16) *cipherSuiteTLS13 // CipherSuiteTLS13ByID gets a TLS 1.3 cipher suite. diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go118.go b/vendor/github.com/quic-go/quic-go/internal/qtls/go120.go similarity index 92% rename from vendor/github.com/lucas-clemente/quic-go/internal/qtls/go118.go rename to vendor/github.com/quic-go/quic-go/internal/qtls/go120.go index e02de380..b9baa52f 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go118.go +++ b/vendor/github.com/quic-go/quic-go/internal/qtls/go120.go @@ -1,4 +1,4 @@ -//go:build go1.18 && !go1.19 +//go:build go1.20 package qtls @@ -9,7 +9,7 @@ import ( "net" "unsafe" - "github.com/marten-seemann/qtls-go1-18" + "github.com/quic-go/qtls-go1-20" ) type ( @@ -17,7 +17,7 @@ type ( Alert = qtls.Alert // A Certificate is qtls.Certificate. Certificate = qtls.Certificate - // CertificateRequestInfo contains inforamtion about a certificate request. + // CertificateRequestInfo contains information about a certificate request. CertificateRequestInfo = qtls.CertificateRequestInfo // A CipherSuiteTLS13 is a cipher suite for TLS 1.3 CipherSuiteTLS13 = qtls.CipherSuiteTLS13 @@ -83,7 +83,7 @@ type cipherSuiteTLS13 struct { Hash crypto.Hash } -//go:linkname cipherSuiteTLS13ByID github.com/marten-seemann/qtls-go1-18.cipherSuiteTLS13ByID +//go:linkname cipherSuiteTLS13ByID github.com/quic-go/qtls-go1-20.cipherSuiteTLS13ByID func cipherSuiteTLS13ByID(id uint16) *cipherSuiteTLS13 // CipherSuiteTLS13ByID gets a TLS 1.3 cipher suite. diff --git a/vendor/github.com/quic-go/quic-go/internal/qtls/go121.go b/vendor/github.com/quic-go/quic-go/internal/qtls/go121.go new file mode 100644 index 00000000..b3340639 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/qtls/go121.go @@ -0,0 +1,5 @@ +//go:build go1.21 + +package qtls + +var _ int = "The version of quic-go you're using can't be built on Go 1.21 yet. For more details, please see https://github.com/quic-go/quic-go/wiki/quic-go-and-Go-versions." diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go_oldversion.go b/vendor/github.com/quic-go/quic-go/internal/qtls/go_oldversion.go similarity index 62% rename from vendor/github.com/lucas-clemente/quic-go/internal/qtls/go_oldversion.go rename to vendor/github.com/quic-go/quic-go/internal/qtls/go_oldversion.go index 83418e9e..e15f0362 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/qtls/go_oldversion.go +++ b/vendor/github.com/quic-go/quic-go/internal/qtls/go_oldversion.go @@ -1,5 +1,5 @@ -//go:build !go1.18 +//go:build !go1.19 package qtls -var _ int = "The version of quic-go you're using can't be built using outdated Go versions. For more details, please see https://github.com/lucas-clemente/quic-go/wiki/quic-go-and-Go-versions." +var _ int = "The version of quic-go you're using can't be built using outdated Go versions. For more details, please see https://github.com/quic-go/quic-go/wiki/quic-go-and-Go-versions." diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/buffered_write_closer.go b/vendor/github.com/quic-go/quic-go/internal/utils/buffered_write_closer.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/internal/utils/buffered_write_closer.go rename to vendor/github.com/quic-go/quic-go/internal/utils/buffered_write_closer.go diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/byteorder.go b/vendor/github.com/quic-go/quic-go/internal/utils/byteorder.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/internal/utils/byteorder.go rename to vendor/github.com/quic-go/quic-go/internal/utils/byteorder.go diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/byteorder_big_endian.go b/vendor/github.com/quic-go/quic-go/internal/utils/byteorder_big_endian.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/internal/utils/byteorder_big_endian.go rename to vendor/github.com/quic-go/quic-go/internal/utils/byteorder_big_endian.go diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/ip.go b/vendor/github.com/quic-go/quic-go/internal/utils/ip.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/internal/utils/ip.go rename to vendor/github.com/quic-go/quic-go/internal/utils/ip.go diff --git a/vendor/github.com/quic-go/quic-go/internal/utils/linkedlist/README.md b/vendor/github.com/quic-go/quic-go/internal/utils/linkedlist/README.md new file mode 100644 index 00000000..66482f4f --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/utils/linkedlist/README.md @@ -0,0 +1,6 @@ +# Usage + +This is the Go standard library implementation of a linked list +(https://golang.org/src/container/list/list.go), with the following modifications: +* it uses Go generics +* it allows passing in a `sync.Pool` (via the `NewWithPool` constructor) to reduce allocations of `Element` structs diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/linkedlist/linkedlist.go b/vendor/github.com/quic-go/quic-go/internal/utils/linkedlist/linkedlist.go similarity index 91% rename from vendor/github.com/lucas-clemente/quic-go/internal/utils/linkedlist/linkedlist.go rename to vendor/github.com/quic-go/quic-go/internal/utils/linkedlist/linkedlist.go index 217b21ef..804a3444 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/linkedlist/linkedlist.go +++ b/vendor/github.com/quic-go/quic-go/internal/utils/linkedlist/linkedlist.go @@ -11,6 +11,12 @@ // } package list +import "sync" + +func NewPool[T any]() *sync.Pool { + return &sync.Pool{New: func() any { return &Element[T]{} }} +} + // Element is an element of a linked list. type Element[T any] struct { // Next and previous pointers in the doubly-linked list of elements. @@ -43,11 +49,17 @@ func (e *Element[T]) Prev() *Element[T] { return nil } +func (e *Element[T]) List() *List[T] { + return e.list +} + // List represents a doubly linked list. // The zero value for List is an empty list ready to use. type List[T any] struct { root Element[T] // sentinel list element, only &root, root.prev, and root.next are used len int // current list length excluding (this) sentinel element + + pool *sync.Pool } // Init initializes or clears list l. @@ -61,6 +73,12 @@ func (l *List[T]) Init() *List[T] { // New returns an initialized list. func New[T any]() *List[T] { return new(List[T]).Init() } +// NewWithPool returns an initialized list, using a sync.Pool for list elements. +func NewWithPool[T any](pool *sync.Pool) *List[T] { + l := &List[T]{pool: pool} + return l.Init() +} + // Len returns the number of elements of list l. // The complexity is O(1). func (l *List[T]) Len() int { return l.len } @@ -101,7 +119,14 @@ func (l *List[T]) insert(e, at *Element[T]) *Element[T] { // insertValue is a convenience wrapper for insert(&Element{Value: v}, at). func (l *List[T]) insertValue(v T, at *Element[T]) *Element[T] { - return l.insert(&Element[T]{Value: v}, at) + var e *Element[T] + if l.pool != nil { + e = l.pool.Get().(*Element[T]) + } else { + e = &Element[T]{} + } + e.Value = v + return l.insert(e, at) } // remove removes e from its list, decrements l.len @@ -111,6 +136,9 @@ func (l *List[T]) remove(e *Element[T]) { e.next = nil // avoid memory leaks e.prev = nil // avoid memory leaks e.list = nil + if l.pool != nil { + l.pool.Put(e) + } l.len-- } @@ -132,12 +160,13 @@ func (l *List[T]) move(e, at *Element[T]) { // It returns the element value e.Value. // The element must not be nil. func (l *List[T]) Remove(e *Element[T]) T { + v := e.Value if e.list == l { // if e.list == l, l must have been initialized when e was inserted // in l or l == nil (e is a zero Element) and l.remove will crash l.remove(e) } - return e.Value + return v } // PushFront inserts a new element e with value v at the front of list l and returns e. diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/log.go b/vendor/github.com/quic-go/quic-go/internal/utils/log.go similarity index 98% rename from vendor/github.com/lucas-clemente/quic-go/internal/utils/log.go rename to vendor/github.com/quic-go/quic-go/internal/utils/log.go index e27f01b4..89b52c0d 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/log.go +++ b/vendor/github.com/quic-go/quic-go/internal/utils/log.go @@ -125,7 +125,7 @@ func readLoggingEnv() LogLevel { case "error": return LogLevelError default: - fmt.Fprintln(os.Stderr, "invalid quic-go log level, see https://github.com/lucas-clemente/quic-go/wiki/Logging") + fmt.Fprintln(os.Stderr, "invalid quic-go log level, see https://github.com/quic-go/quic-go/wiki/Logging") return LogLevelNothing } } diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/minmax.go b/vendor/github.com/quic-go/quic-go/internal/utils/minmax.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/internal/utils/minmax.go rename to vendor/github.com/quic-go/quic-go/internal/utils/minmax.go diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/rand.go b/vendor/github.com/quic-go/quic-go/internal/utils/rand.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/internal/utils/rand.go rename to vendor/github.com/quic-go/quic-go/internal/utils/rand.go diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/rtt_stats.go b/vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go similarity index 98% rename from vendor/github.com/lucas-clemente/quic-go/internal/utils/rtt_stats.go rename to vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go index 1a43ea12..527539e1 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/rtt_stats.go +++ b/vendor/github.com/quic-go/quic-go/internal/utils/rtt_stats.go @@ -3,7 +3,7 @@ package utils import ( "time" - "github.com/lucas-clemente/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/protocol" ) const ( diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/timer.go b/vendor/github.com/quic-go/quic-go/internal/utils/timer.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/internal/utils/timer.go rename to vendor/github.com/quic-go/quic-go/internal/utils/timer.go diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/ack_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/ack_frame.go similarity index 97% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/ack_frame.go rename to vendor/github.com/quic-go/quic-go/internal/wire/ack_frame.go index 1ea8a234..5b01649a 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/ack_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/ack_frame.go @@ -6,9 +6,9 @@ import ( "sort" "time" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/quicvarint" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/quicvarint" ) var errInvalidAckRanges = errors.New("AckFrame: ACK frame contains invalid ACK ranges") diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/ack_frame_pool.go b/vendor/github.com/quic-go/quic-go/internal/wire/ack_frame_pool.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/ack_frame_pool.go rename to vendor/github.com/quic-go/quic-go/internal/wire/ack_frame_pool.go diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/ack_range.go b/vendor/github.com/quic-go/quic-go/internal/wire/ack_range.go similarity index 82% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/ack_range.go rename to vendor/github.com/quic-go/quic-go/internal/wire/ack_range.go index 0f418580..03a1235e 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/ack_range.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/ack_range.go @@ -1,6 +1,6 @@ package wire -import "github.com/lucas-clemente/quic-go/internal/protocol" +import "github.com/quic-go/quic-go/internal/protocol" // AckRange is an ACK range type AckRange struct { diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/connection_close_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/connection_close_frame.go similarity index 95% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/connection_close_frame.go rename to vendor/github.com/quic-go/quic-go/internal/wire/connection_close_frame.go index 4014a941..de2283b3 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/connection_close_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/connection_close_frame.go @@ -4,8 +4,8 @@ import ( "bytes" "io" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/quicvarint" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" ) // A ConnectionCloseFrame is a CONNECTION_CLOSE frame diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/crypto_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/crypto_frame.go similarity index 96% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/crypto_frame.go rename to vendor/github.com/quic-go/quic-go/internal/wire/crypto_frame.go index 62a591ca..99ffb21d 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/crypto_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/crypto_frame.go @@ -4,8 +4,8 @@ import ( "bytes" "io" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/quicvarint" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" ) // A CryptoFrame is a CRYPTO frame diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/data_blocked_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/data_blocked_frame.go similarity index 88% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/data_blocked_frame.go rename to vendor/github.com/quic-go/quic-go/internal/wire/data_blocked_frame.go index e553632b..b567af8a 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/data_blocked_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/data_blocked_frame.go @@ -3,8 +3,8 @@ package wire import ( "bytes" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/quicvarint" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" ) // A DataBlockedFrame is a DATA_BLOCKED frame diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/datagram_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/datagram_frame.go similarity index 94% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/datagram_frame.go rename to vendor/github.com/quic-go/quic-go/internal/wire/datagram_frame.go index 04f0ce1b..756a23ff 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/datagram_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/datagram_frame.go @@ -4,8 +4,8 @@ import ( "bytes" "io" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/quicvarint" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" ) // A DatagramFrame is a DATAGRAM frame diff --git a/vendor/github.com/quic-go/quic-go/internal/wire/extended_header.go b/vendor/github.com/quic-go/quic-go/internal/wire/extended_header.go new file mode 100644 index 00000000..d10820d6 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/internal/wire/extended_header.go @@ -0,0 +1,210 @@ +package wire + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/quicvarint" +) + +// ErrInvalidReservedBits is returned when the reserved bits are incorrect. +// When this error is returned, parsing continues, and an ExtendedHeader is returned. +// This is necessary because we need to decrypt the packet in that case, +// in order to avoid a timing side-channel. +var ErrInvalidReservedBits = errors.New("invalid reserved bits") + +// ExtendedHeader is the header of a QUIC packet. +type ExtendedHeader struct { + Header + + typeByte byte + + KeyPhase protocol.KeyPhaseBit + + PacketNumberLen protocol.PacketNumberLen + PacketNumber protocol.PacketNumber + + parsedLen protocol.ByteCount +} + +func (h *ExtendedHeader) parse(b *bytes.Reader, v protocol.VersionNumber) (bool /* reserved bits valid */, error) { + startLen := b.Len() + // read the (now unencrypted) first byte + var err error + h.typeByte, err = b.ReadByte() + if err != nil { + return false, err + } + if _, err := b.Seek(int64(h.Header.ParsedLen())-1, io.SeekCurrent); err != nil { + return false, err + } + reservedBitsValid, err := h.parseLongHeader(b, v) + if err != nil { + return false, err + } + h.parsedLen = protocol.ByteCount(startLen - b.Len()) + return reservedBitsValid, err +} + +func (h *ExtendedHeader) parseLongHeader(b *bytes.Reader, _ protocol.VersionNumber) (bool /* reserved bits valid */, error) { + if err := h.readPacketNumber(b); err != nil { + return false, err + } + if h.typeByte&0xc != 0 { + return false, nil + } + return true, nil +} + +func (h *ExtendedHeader) readPacketNumber(b *bytes.Reader) error { + h.PacketNumberLen = protocol.PacketNumberLen(h.typeByte&0x3) + 1 + switch h.PacketNumberLen { + case protocol.PacketNumberLen1: + n, err := b.ReadByte() + if err != nil { + return err + } + h.PacketNumber = protocol.PacketNumber(n) + case protocol.PacketNumberLen2: + n, err := utils.BigEndian.ReadUint16(b) + if err != nil { + return err + } + h.PacketNumber = protocol.PacketNumber(n) + case protocol.PacketNumberLen3: + n, err := utils.BigEndian.ReadUint24(b) + if err != nil { + return err + } + h.PacketNumber = protocol.PacketNumber(n) + case protocol.PacketNumberLen4: + n, err := utils.BigEndian.ReadUint32(b) + if err != nil { + return err + } + h.PacketNumber = protocol.PacketNumber(n) + default: + return fmt.Errorf("invalid packet number length: %d", h.PacketNumberLen) + } + return nil +} + +// Append appends the Header. +func (h *ExtendedHeader) Append(b []byte, v protocol.VersionNumber) ([]byte, error) { + if h.DestConnectionID.Len() > protocol.MaxConnIDLen { + return nil, fmt.Errorf("invalid connection ID length: %d bytes", h.DestConnectionID.Len()) + } + if h.SrcConnectionID.Len() > protocol.MaxConnIDLen { + return nil, fmt.Errorf("invalid connection ID length: %d bytes", h.SrcConnectionID.Len()) + } + + var packetType uint8 + if v == protocol.Version2 { + //nolint:exhaustive + switch h.Type { + case protocol.PacketTypeInitial: + packetType = 0b01 + case protocol.PacketType0RTT: + packetType = 0b10 + case protocol.PacketTypeHandshake: + packetType = 0b11 + case protocol.PacketTypeRetry: + packetType = 0b00 + } + } else { + //nolint:exhaustive + switch h.Type { + case protocol.PacketTypeInitial: + packetType = 0b00 + case protocol.PacketType0RTT: + packetType = 0b01 + case protocol.PacketTypeHandshake: + packetType = 0b10 + case protocol.PacketTypeRetry: + packetType = 0b11 + } + } + firstByte := 0xc0 | packetType<<4 + if h.Type != protocol.PacketTypeRetry { + // Retry packets don't have a packet number + firstByte |= uint8(h.PacketNumberLen - 1) + } + + b = append(b, firstByte) + b = append(b, make([]byte, 4)...) + binary.BigEndian.PutUint32(b[len(b)-4:], uint32(h.Version)) + b = append(b, uint8(h.DestConnectionID.Len())) + b = append(b, h.DestConnectionID.Bytes()...) + b = append(b, uint8(h.SrcConnectionID.Len())) + b = append(b, h.SrcConnectionID.Bytes()...) + + //nolint:exhaustive + switch h.Type { + case protocol.PacketTypeRetry: + b = append(b, h.Token...) + return b, nil + case protocol.PacketTypeInitial: + b = quicvarint.Append(b, uint64(len(h.Token))) + b = append(b, h.Token...) + } + b = quicvarint.AppendWithLen(b, uint64(h.Length), 2) + return appendPacketNumber(b, h.PacketNumber, h.PacketNumberLen) +} + +// ParsedLen returns the number of bytes that were consumed when parsing the header +func (h *ExtendedHeader) ParsedLen() protocol.ByteCount { + return h.parsedLen +} + +// GetLength determines the length of the Header. +func (h *ExtendedHeader) GetLength(_ protocol.VersionNumber) protocol.ByteCount { + length := 1 /* type byte */ + 4 /* version */ + 1 /* dest conn ID len */ + protocol.ByteCount(h.DestConnectionID.Len()) + 1 /* src conn ID len */ + protocol.ByteCount(h.SrcConnectionID.Len()) + protocol.ByteCount(h.PacketNumberLen) + 2 /* length */ + if h.Type == protocol.PacketTypeInitial { + length += quicvarint.Len(uint64(len(h.Token))) + protocol.ByteCount(len(h.Token)) + } + return length +} + +// Log logs the Header +func (h *ExtendedHeader) Log(logger utils.Logger) { + var token string + if h.Type == protocol.PacketTypeInitial || h.Type == protocol.PacketTypeRetry { + if len(h.Token) == 0 { + token = "Token: (empty), " + } else { + token = fmt.Sprintf("Token: %#x, ", h.Token) + } + if h.Type == protocol.PacketTypeRetry { + logger.Debugf("\tLong Header{Type: %s, DestConnectionID: %s, SrcConnectionID: %s, %sVersion: %s}", h.Type, h.DestConnectionID, h.SrcConnectionID, token, h.Version) + return + } + } + logger.Debugf("\tLong Header{Type: %s, DestConnectionID: %s, SrcConnectionID: %s, %sPacketNumber: %d, PacketNumberLen: %d, Length: %d, Version: %s}", h.Type, h.DestConnectionID, h.SrcConnectionID, token, h.PacketNumber, h.PacketNumberLen, h.Length, h.Version) +} + +func appendPacketNumber(b []byte, pn protocol.PacketNumber, pnLen protocol.PacketNumberLen) ([]byte, error) { + switch pnLen { + case protocol.PacketNumberLen1: + b = append(b, uint8(pn)) + case protocol.PacketNumberLen2: + buf := make([]byte, 2) + binary.BigEndian.PutUint16(buf, uint16(pn)) + b = append(b, buf...) + case protocol.PacketNumberLen3: + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(pn)) + b = append(b, buf[1:]...) + case protocol.PacketNumberLen4: + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(pn)) + b = append(b, buf...) + default: + return nil, fmt.Errorf("invalid packet number length: %d", pnLen) + } + return b, nil +} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/frame_parser.go b/vendor/github.com/quic-go/quic-go/internal/wire/frame_parser.go similarity index 63% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/frame_parser.go rename to vendor/github.com/quic-go/quic-go/internal/wire/frame_parser.go index 70a117e8..ec744d90 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/frame_parser.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/frame_parser.go @@ -6,8 +6,8 @@ import ( "fmt" "reflect" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" ) type frameParser struct { @@ -16,31 +16,30 @@ type frameParser struct { ackDelayExponent uint8 supportsDatagrams bool - - version protocol.VersionNumber } +var _ FrameParser = &frameParser{} + // NewFrameParser creates a new frame parser. -func NewFrameParser(supportsDatagrams bool, v protocol.VersionNumber) FrameParser { +func NewFrameParser(supportsDatagrams bool) *frameParser { return &frameParser{ r: *bytes.NewReader(nil), supportsDatagrams: supportsDatagrams, - version: v, } } // ParseNext parses the next frame. // It skips PADDING frames. -func (p *frameParser) ParseNext(data []byte, encLevel protocol.EncryptionLevel) (int, Frame, error) { +func (p *frameParser) ParseNext(data []byte, encLevel protocol.EncryptionLevel, v protocol.VersionNumber) (int, Frame, error) { startLen := len(data) p.r.Reset(data) - frame, err := p.parseNext(&p.r, encLevel) + frame, err := p.parseNext(&p.r, encLevel, v) n := startLen - p.r.Len() p.r.Reset(nil) return n, frame, err } -func (p *frameParser) parseNext(r *bytes.Reader, encLevel protocol.EncryptionLevel) (Frame, error) { +func (p *frameParser) parseNext(r *bytes.Reader, encLevel protocol.EncryptionLevel, v protocol.VersionNumber) (Frame, error) { for r.Len() != 0 { typeByte, _ := p.r.ReadByte() if typeByte == 0x0 { // PADDING frame @@ -48,7 +47,7 @@ func (p *frameParser) parseNext(r *bytes.Reader, encLevel protocol.EncryptionLev } r.UnreadByte() - f, err := p.parseFrame(r, typeByte, encLevel) + f, err := p.parseFrame(r, typeByte, encLevel, v) if err != nil { return nil, &qerr.TransportError{ FrameType: uint64(typeByte), @@ -61,56 +60,56 @@ func (p *frameParser) parseNext(r *bytes.Reader, encLevel protocol.EncryptionLev return nil, nil } -func (p *frameParser) parseFrame(r *bytes.Reader, typeByte byte, encLevel protocol.EncryptionLevel) (Frame, error) { +func (p *frameParser) parseFrame(r *bytes.Reader, typeByte byte, encLevel protocol.EncryptionLevel, v protocol.VersionNumber) (Frame, error) { var frame Frame var err error if typeByte&0xf8 == 0x8 { - frame, err = parseStreamFrame(r, p.version) + frame, err = parseStreamFrame(r, v) } else { switch typeByte { case 0x1: - frame, err = parsePingFrame(r, p.version) + frame, err = parsePingFrame(r, v) case 0x2, 0x3: ackDelayExponent := p.ackDelayExponent if encLevel != protocol.Encryption1RTT { ackDelayExponent = protocol.DefaultAckDelayExponent } - frame, err = parseAckFrame(r, ackDelayExponent, p.version) + frame, err = parseAckFrame(r, ackDelayExponent, v) case 0x4: - frame, err = parseResetStreamFrame(r, p.version) + frame, err = parseResetStreamFrame(r, v) case 0x5: - frame, err = parseStopSendingFrame(r, p.version) + frame, err = parseStopSendingFrame(r, v) case 0x6: - frame, err = parseCryptoFrame(r, p.version) + frame, err = parseCryptoFrame(r, v) case 0x7: - frame, err = parseNewTokenFrame(r, p.version) + frame, err = parseNewTokenFrame(r, v) case 0x10: - frame, err = parseMaxDataFrame(r, p.version) + frame, err = parseMaxDataFrame(r, v) case 0x11: - frame, err = parseMaxStreamDataFrame(r, p.version) + frame, err = parseMaxStreamDataFrame(r, v) case 0x12, 0x13: - frame, err = parseMaxStreamsFrame(r, p.version) + frame, err = parseMaxStreamsFrame(r, v) case 0x14: - frame, err = parseDataBlockedFrame(r, p.version) + frame, err = parseDataBlockedFrame(r, v) case 0x15: - frame, err = parseStreamDataBlockedFrame(r, p.version) + frame, err = parseStreamDataBlockedFrame(r, v) case 0x16, 0x17: - frame, err = parseStreamsBlockedFrame(r, p.version) + frame, err = parseStreamsBlockedFrame(r, v) case 0x18: - frame, err = parseNewConnectionIDFrame(r, p.version) + frame, err = parseNewConnectionIDFrame(r, v) case 0x19: - frame, err = parseRetireConnectionIDFrame(r, p.version) + frame, err = parseRetireConnectionIDFrame(r, v) case 0x1a: - frame, err = parsePathChallengeFrame(r, p.version) + frame, err = parsePathChallengeFrame(r, v) case 0x1b: - frame, err = parsePathResponseFrame(r, p.version) + frame, err = parsePathResponseFrame(r, v) case 0x1c, 0x1d: - frame, err = parseConnectionCloseFrame(r, p.version) + frame, err = parseConnectionCloseFrame(r, v) case 0x1e: - frame, err = parseHandshakeDoneFrame(r, p.version) + frame, err = parseHandshakeDoneFrame(r, v) case 0x30, 0x31: if p.supportsDatagrams { - frame, err = parseDatagramFrame(r, p.version) + frame, err = parseDatagramFrame(r, v) break } fallthrough diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/handshake_done_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/handshake_done_frame.go similarity index 91% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/handshake_done_frame.go rename to vendor/github.com/quic-go/quic-go/internal/wire/handshake_done_frame.go index b9947044..7bbc0e88 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/handshake_done_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/handshake_done_frame.go @@ -3,7 +3,7 @@ package wire import ( "bytes" - "github.com/lucas-clemente/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/protocol" ) // A HandshakeDoneFrame is a HANDSHAKE_DONE frame diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/header.go b/vendor/github.com/quic-go/quic-go/internal/wire/header.go similarity index 82% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/header.go rename to vendor/github.com/quic-go/quic-go/internal/wire/header.go index 4e7da480..4d3c5049 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/header.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/header.go @@ -7,9 +7,9 @@ import ( "fmt" "io" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/quicvarint" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/quicvarint" ) // ParseConnectionID parses the destination connection ID of a packet. @@ -121,9 +121,8 @@ var ErrUnsupportedVersion = errors.New("unsupported version") // The Header is the version independent part of the header type Header struct { - IsLongHeader bool - typeByte byte - Type protocol.PacketType + typeByte byte + Type protocol.PacketType Version protocol.VersionNumber SrcConnectionID protocol.ConnectionID @@ -140,24 +139,22 @@ type Header struct { // If the packet has a long header, the packet is cut according to the length field. // If we understand the version, the packet is header up unto the packet number. // Otherwise, only the invariant part of the header is parsed. -func ParsePacket(data []byte, shortHeaderConnIDLen int) (*Header, []byte /* packet data */, []byte /* rest */, error) { - hdr, err := parseHeader(bytes.NewReader(data), shortHeaderConnIDLen) +func ParsePacket(data []byte) (*Header, []byte, []byte, error) { + if len(data) == 0 || !IsLongHeaderPacket(data[0]) { + return nil, nil, nil, errors.New("not a long header packet") + } + hdr, err := parseHeader(bytes.NewReader(data)) if err != nil { if err == ErrUnsupportedVersion { return hdr, nil, nil, ErrUnsupportedVersion } return nil, nil, nil, err } - var rest []byte - if hdr.IsLongHeader { - if protocol.ByteCount(len(data)) < hdr.ParsedLen()+hdr.Length { - return nil, nil, nil, fmt.Errorf("packet length (%d bytes) is smaller than the expected length (%d bytes)", len(data)-int(hdr.ParsedLen()), hdr.Length) - } - packetLen := int(hdr.ParsedLen() + hdr.Length) - rest = data[packetLen:] - data = data[:packetLen] + if protocol.ByteCount(len(data)) < hdr.ParsedLen()+hdr.Length { + return nil, nil, nil, fmt.Errorf("packet length (%d bytes) is smaller than the expected length (%d bytes)", len(data)-int(hdr.ParsedLen()), hdr.Length) } - return hdr, data, rest, nil + packetLen := int(hdr.ParsedLen() + hdr.Length) + return hdr, data[:packetLen], data[packetLen:], nil } // ParseHeader parses the header. @@ -165,43 +162,17 @@ func ParsePacket(data []byte, shortHeaderConnIDLen int) (*Header, []byte /* pack // For long header packets: // * if we understand the version: up to the packet number // * if not, only the invariant part of the header -func parseHeader(b *bytes.Reader, shortHeaderConnIDLen int) (*Header, error) { +func parseHeader(b *bytes.Reader) (*Header, error) { startLen := b.Len() - h, err := parseHeaderImpl(b, shortHeaderConnIDLen) - if err != nil { - return h, err - } - h.parsedLen = protocol.ByteCount(startLen - b.Len()) - return h, err -} - -func parseHeaderImpl(b *bytes.Reader, shortHeaderConnIDLen int) (*Header, error) { typeByte, err := b.ReadByte() if err != nil { return nil, err } - h := &Header{ - typeByte: typeByte, - IsLongHeader: IsLongHeaderPacket(typeByte), - } - - if !h.IsLongHeader { - if h.typeByte&0x40 == 0 { - return nil, errors.New("not a QUIC packet") - } - if err := h.parseShortHeader(b, shortHeaderConnIDLen); err != nil { - return nil, err - } - return h, nil - } - return h, h.parseLongHeader(b) -} - -func (h *Header) parseShortHeader(b *bytes.Reader, shortHeaderConnIDLen int) error { - var err error - h.DestConnectionID, err = protocol.ReadConnectionID(b, shortHeaderConnIDLen) - return err + h := &Header{typeByte: typeByte} + err = h.parseLongHeader(b) + h.parsedLen = protocol.ByteCount(startLen - b.Len()) + return h, err } func (h *Header) parseLongHeader(b *bytes.Reader) error { @@ -321,8 +292,5 @@ func (h *Header) toExtendedHeader() *ExtendedHeader { // PacketType is the type of the packet, for logging purposes func (h *Header) PacketType() string { - if h.IsLongHeader { - return h.Type.String() - } - return "1-RTT" + return h.Type.String() } diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/interface.go b/vendor/github.com/quic-go/quic-go/internal/wire/interface.go similarity index 68% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/interface.go rename to vendor/github.com/quic-go/quic-go/internal/wire/interface.go index dc717588..7e0f9a03 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/interface.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/interface.go @@ -1,7 +1,7 @@ package wire import ( - "github.com/lucas-clemente/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/protocol" ) // A Frame in QUIC @@ -12,6 +12,6 @@ type Frame interface { // A FrameParser parses QUIC frames, one by one. type FrameParser interface { - ParseNext([]byte, protocol.EncryptionLevel) (int, Frame, error) + ParseNext([]byte, protocol.EncryptionLevel, protocol.VersionNumber) (int, Frame, error) SetAckDelayExponent(uint8) } diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/log.go b/vendor/github.com/quic-go/quic-go/internal/wire/log.go similarity index 96% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/log.go rename to vendor/github.com/quic-go/quic-go/internal/wire/log.go index 30cf9424..ec7d45d8 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/log.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/log.go @@ -4,8 +4,8 @@ import ( "fmt" "strings" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" ) // LogFrame logs a frame, either sent or received diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/max_data_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/max_data_frame.go similarity index 89% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/max_data_frame.go rename to vendor/github.com/quic-go/quic-go/internal/wire/max_data_frame.go index 36fb5029..427c8110 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/max_data_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/max_data_frame.go @@ -3,8 +3,8 @@ package wire import ( "bytes" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/quicvarint" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" ) // A MaxDataFrame carries flow control information for the connection diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/max_stream_data_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/max_stream_data_frame.go similarity index 91% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/max_stream_data_frame.go rename to vendor/github.com/quic-go/quic-go/internal/wire/max_stream_data_frame.go index f0cd2a99..4218c09b 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/max_stream_data_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/max_stream_data_frame.go @@ -3,8 +3,8 @@ package wire import ( "bytes" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/quicvarint" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" ) // A MaxStreamDataFrame is a MAX_STREAM_DATA frame diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/max_streams_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/max_streams_frame.go similarity index 92% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/max_streams_frame.go rename to vendor/github.com/quic-go/quic-go/internal/wire/max_streams_frame.go index 9601fafb..f417127c 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/max_streams_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/max_streams_frame.go @@ -4,8 +4,8 @@ import ( "bytes" "fmt" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/quicvarint" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" ) // A MaxStreamsFrame is a MAX_STREAMS frame diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/new_connection_id_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/new_connection_id_frame.go similarity index 94% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/new_connection_id_frame.go rename to vendor/github.com/quic-go/quic-go/internal/wire/new_connection_id_frame.go index 828cda3b..5f6ab998 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/new_connection_id_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/new_connection_id_frame.go @@ -5,8 +5,8 @@ import ( "fmt" "io" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/quicvarint" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" ) // A NewConnectionIDFrame is a NEW_CONNECTION_ID frame diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/new_token_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/new_token_frame.go similarity index 91% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/new_token_frame.go rename to vendor/github.com/quic-go/quic-go/internal/wire/new_token_frame.go index 4703f8ca..cc1d5819 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/new_token_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/new_token_frame.go @@ -5,8 +5,8 @@ import ( "errors" "io" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/quicvarint" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" ) // A NewTokenFrame is a NEW_TOKEN frame diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/path_challenge_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/path_challenge_frame.go similarity index 93% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/path_challenge_frame.go rename to vendor/github.com/quic-go/quic-go/internal/wire/path_challenge_frame.go index 6288c031..5d32865e 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/path_challenge_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/path_challenge_frame.go @@ -4,7 +4,7 @@ import ( "bytes" "io" - "github.com/lucas-clemente/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/protocol" ) // A PathChallengeFrame is a PATH_CHALLENGE frame diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/path_response_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/path_response_frame.go similarity index 93% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/path_response_frame.go rename to vendor/github.com/quic-go/quic-go/internal/wire/path_response_frame.go index 59b6b87c..5c49e122 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/path_response_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/path_response_frame.go @@ -4,7 +4,7 @@ import ( "bytes" "io" - "github.com/lucas-clemente/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/protocol" ) // A PathResponseFrame is a PATH_RESPONSE frame diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/ping_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/ping_frame.go similarity index 90% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/ping_frame.go rename to vendor/github.com/quic-go/quic-go/internal/wire/ping_frame.go index 082ffa44..ba32d167 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/ping_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/ping_frame.go @@ -3,7 +3,7 @@ package wire import ( "bytes" - "github.com/lucas-clemente/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/protocol" ) // A PingFrame is a PING frame diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/pool.go b/vendor/github.com/quic-go/quic-go/internal/wire/pool.go similarity index 90% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/pool.go rename to vendor/github.com/quic-go/quic-go/internal/wire/pool.go index c057395e..18ab4379 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/pool.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/pool.go @@ -3,7 +3,7 @@ package wire import ( "sync" - "github.com/lucas-clemente/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/protocol" ) var pool sync.Pool diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/reset_stream_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/reset_stream_frame.go similarity index 90% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/reset_stream_frame.go rename to vendor/github.com/quic-go/quic-go/internal/wire/reset_stream_frame.go index 4e98890a..46213813 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/reset_stream_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/reset_stream_frame.go @@ -3,9 +3,9 @@ package wire import ( "bytes" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/quicvarint" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/quicvarint" ) // A ResetStreamFrame is a RESET_STREAM frame in QUIC diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/retire_connection_id_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/retire_connection_id_frame.go similarity index 88% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/retire_connection_id_frame.go rename to vendor/github.com/quic-go/quic-go/internal/wire/retire_connection_id_frame.go index 9e707a8c..3e4f58ac 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/retire_connection_id_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/retire_connection_id_frame.go @@ -3,8 +3,8 @@ package wire import ( "bytes" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/quicvarint" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" ) // A RetireConnectionIDFrame is a RETIRE_CONNECTION_ID frame diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/short_header.go b/vendor/github.com/quic-go/quic-go/internal/wire/short_header.go similarity index 65% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/short_header.go rename to vendor/github.com/quic-go/quic-go/internal/wire/short_header.go index 9639b5d4..69aa8341 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/short_header.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/short_header.go @@ -5,10 +5,13 @@ import ( "fmt" "io" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" ) +// ParseShortHeader parses a short header packet. +// It must be called after header protection was removed. +// Otherwise, the check for the reserved bits will (most likely) fail. func ParseShortHeader(data []byte, connIDLen int) (length int, _ protocol.PacketNumber, _ protocol.PacketNumberLen, _ protocol.KeyPhaseBit, _ error) { if len(data) == 0 { return 0, 0, 0, 0, io.EOF @@ -50,6 +53,21 @@ func ParseShortHeader(data []byte, connIDLen int) (length int, _ protocol.Packet return 1 + connIDLen + int(pnLen), pn, pnLen, kp, err } +// AppendShortHeader writes a short header. +func AppendShortHeader(b []byte, connID protocol.ConnectionID, pn protocol.PacketNumber, pnLen protocol.PacketNumberLen, kp protocol.KeyPhaseBit) ([]byte, error) { + typeByte := 0x40 | uint8(pnLen-1) + if kp == protocol.KeyPhaseOne { + typeByte |= byte(1 << 2) + } + b = append(b, typeByte) + b = append(b, connID.Bytes()...) + return appendPacketNumber(b, pn, pnLen) +} + +func ShortHeaderLen(dest protocol.ConnectionID, pnLen protocol.PacketNumberLen) protocol.ByteCount { + return 1 + protocol.ByteCount(dest.Len()) + protocol.ByteCount(pnLen) +} + func LogShortHeader(logger utils.Logger, dest protocol.ConnectionID, pn protocol.PacketNumber, pnLen protocol.PacketNumberLen, kp protocol.KeyPhaseBit) { logger.Debugf("\tShort Header{DestConnectionID: %s, PacketNumber: %d, PacketNumberLen: %d, KeyPhase: %s}", dest, pn, pnLen, kp) } diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/stop_sending_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/stop_sending_frame.go similarity index 87% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/stop_sending_frame.go rename to vendor/github.com/quic-go/quic-go/internal/wire/stop_sending_frame.go index bede9ec9..e47a0f4a 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/stop_sending_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/stop_sending_frame.go @@ -3,9 +3,9 @@ package wire import ( "bytes" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/quicvarint" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/quicvarint" ) // A StopSendingFrame is a STOP_SENDING frame diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/stream_data_blocked_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/stream_data_blocked_frame.go similarity index 91% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/stream_data_blocked_frame.go rename to vendor/github.com/quic-go/quic-go/internal/wire/stream_data_blocked_frame.go index f8061cf4..2d3fb07e 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/stream_data_blocked_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/stream_data_blocked_frame.go @@ -3,8 +3,8 @@ package wire import ( "bytes" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/quicvarint" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" ) // A StreamDataBlockedFrame is a STREAM_DATA_BLOCKED frame diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/stream_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/stream_frame.go similarity index 97% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/stream_frame.go rename to vendor/github.com/quic-go/quic-go/internal/wire/stream_frame.go index 132d26aa..ebf3101c 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/stream_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/stream_frame.go @@ -5,8 +5,8 @@ import ( "errors" "io" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/quicvarint" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" ) // A StreamFrame of QUIC diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/streams_blocked_frame.go b/vendor/github.com/quic-go/quic-go/internal/wire/streams_blocked_frame.go similarity index 92% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/streams_blocked_frame.go rename to vendor/github.com/quic-go/quic-go/internal/wire/streams_blocked_frame.go index 525d42ff..5e556cb8 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/streams_blocked_frame.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/streams_blocked_frame.go @@ -4,8 +4,8 @@ import ( "bytes" "fmt" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/quicvarint" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/quicvarint" ) // A StreamsBlockedFrame is a STREAMS_BLOCKED frame diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/transport_parameters.go b/vendor/github.com/quic-go/quic-go/internal/wire/transport_parameters.go similarity index 98% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/transport_parameters.go rename to vendor/github.com/quic-go/quic-go/internal/wire/transport_parameters.go index 2ac37400..a64638cb 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/transport_parameters.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/transport_parameters.go @@ -11,10 +11,10 @@ import ( "sort" "time" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/quicvarint" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/quicvarint" ) const transportParameterMarshalingVersion = 1 @@ -303,6 +303,9 @@ func (p *TransportParameters) readNumericTransportParameter( } p.MaxAckDelay = time.Duration(val) * time.Millisecond case activeConnectionIDLimitParameterID: + if val < 2 { + return fmt.Errorf("invalid value for active_connection_id_limit: %d (minimum 2)", val) + } p.ActiveConnectionIDLimit = val case maxDatagramFrameSizeParameterID: p.MaxDatagramFrameSize = protocol.ByteCount(val) diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/version_negotiation.go b/vendor/github.com/quic-go/quic-go/internal/wire/version_negotiation.go similarity index 94% rename from vendor/github.com/lucas-clemente/quic-go/internal/wire/version_negotiation.go rename to vendor/github.com/quic-go/quic-go/internal/wire/version_negotiation.go index 2cfa2ca3..3dc62113 100644 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/version_negotiation.go +++ b/vendor/github.com/quic-go/quic-go/internal/wire/version_negotiation.go @@ -6,8 +6,8 @@ import ( "encoding/binary" "errors" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" ) // ParseVersionNegotiationPacket parses a Version Negotiation packet. diff --git a/vendor/github.com/lucas-clemente/quic-go/logging/frame.go b/vendor/github.com/quic-go/quic-go/logging/frame.go similarity index 97% rename from vendor/github.com/lucas-clemente/quic-go/logging/frame.go rename to vendor/github.com/quic-go/quic-go/logging/frame.go index 75705092..9a055db3 100644 --- a/vendor/github.com/lucas-clemente/quic-go/logging/frame.go +++ b/vendor/github.com/quic-go/quic-go/logging/frame.go @@ -1,6 +1,6 @@ package logging -import "github.com/lucas-clemente/quic-go/internal/wire" +import "github.com/quic-go/quic-go/internal/wire" // A Frame is a QUIC frame type Frame interface{} diff --git a/vendor/github.com/lucas-clemente/quic-go/logging/interface.go b/vendor/github.com/quic-go/quic-go/logging/interface.go similarity index 94% rename from vendor/github.com/lucas-clemente/quic-go/logging/interface.go rename to vendor/github.com/quic-go/quic-go/logging/interface.go index 9d086866..efcef151 100644 --- a/vendor/github.com/lucas-clemente/quic-go/logging/interface.go +++ b/vendor/github.com/quic-go/quic-go/logging/interface.go @@ -7,11 +7,10 @@ import ( "net" "time" - "github.com/lucas-clemente/quic-go/internal/utils" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/wire" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" ) type ( @@ -121,7 +120,8 @@ type ConnectionTracer interface { SentTransportParameters(*TransportParameters) ReceivedTransportParameters(*TransportParameters) RestoredTransportParameters(parameters *TransportParameters) // for 0-RTT - SentPacket(hdr *ExtendedHeader, size ByteCount, ack *AckFrame, frames []Frame) + SentLongHeaderPacket(hdr *ExtendedHeader, size ByteCount, ack *AckFrame, frames []Frame) + SentShortHeaderPacket(hdr *ShortHeader, size ByteCount, ack *AckFrame, frames []Frame) ReceivedVersionNegotiationPacket(dest, src ArbitraryLenConnectionID, _ []VersionNumber) ReceivedRetry(*Header) ReceivedLongHeaderPacket(hdr *ExtendedHeader, size ByteCount, frames []Frame) diff --git a/vendor/github.com/quic-go/quic-go/logging/mockgen.go b/vendor/github.com/quic-go/quic-go/logging/mockgen.go new file mode 100644 index 00000000..d5091679 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/logging/mockgen.go @@ -0,0 +1,4 @@ +package logging + +//go:generate sh -c "go run github.com/golang/mock/mockgen -package logging -self_package github.com/quic-go/quic-go/logging -destination mock_connection_tracer_test.go github.com/quic-go/quic-go/logging ConnectionTracer" +//go:generate sh -c "go run github.com/golang/mock/mockgen -package logging -self_package github.com/quic-go/quic-go/logging -destination mock_tracer_test.go github.com/quic-go/quic-go/logging Tracer" diff --git a/vendor/github.com/lucas-clemente/quic-go/logging/multiplex.go b/vendor/github.com/quic-go/quic-go/logging/multiplex.go similarity index 94% rename from vendor/github.com/lucas-clemente/quic-go/logging/multiplex.go rename to vendor/github.com/quic-go/quic-go/logging/multiplex.go index d7166f1a..8e85db49 100644 --- a/vendor/github.com/lucas-clemente/quic-go/logging/multiplex.go +++ b/vendor/github.com/quic-go/quic-go/logging/multiplex.go @@ -104,9 +104,15 @@ func (m *connTracerMultiplexer) RestoredTransportParameters(tp *TransportParamet } } -func (m *connTracerMultiplexer) SentPacket(hdr *ExtendedHeader, size ByteCount, ack *AckFrame, frames []Frame) { +func (m *connTracerMultiplexer) SentLongHeaderPacket(hdr *ExtendedHeader, size ByteCount, ack *AckFrame, frames []Frame) { for _, t := range m.tracers { - t.SentPacket(hdr, size, ack, frames) + t.SentLongHeaderPacket(hdr, size, ack, frames) + } +} + +func (m *connTracerMultiplexer) SentShortHeaderPacket(hdr *ShortHeader, size ByteCount, ack *AckFrame, frames []Frame) { + for _, t := range m.tracers { + t.SentShortHeaderPacket(hdr, size, ack, frames) } } diff --git a/vendor/github.com/lucas-clemente/quic-go/logging/null_tracer.go b/vendor/github.com/quic-go/quic-go/logging/null_tracer.go similarity index 90% rename from vendor/github.com/lucas-clemente/quic-go/logging/null_tracer.go rename to vendor/github.com/quic-go/quic-go/logging/null_tracer.go index 3103ae90..38052ae3 100644 --- a/vendor/github.com/lucas-clemente/quic-go/logging/null_tracer.go +++ b/vendor/github.com/quic-go/quic-go/logging/null_tracer.go @@ -31,11 +31,12 @@ func (n NullConnectionTracer) StartedConnection(local, remote net.Addr, srcConnI func (n NullConnectionTracer) NegotiatedVersion(chosen VersionNumber, clientVersions, serverVersions []VersionNumber) { } -func (n NullConnectionTracer) ClosedConnection(err error) {} -func (n NullConnectionTracer) SentTransportParameters(*TransportParameters) {} -func (n NullConnectionTracer) ReceivedTransportParameters(*TransportParameters) {} -func (n NullConnectionTracer) RestoredTransportParameters(*TransportParameters) {} -func (n NullConnectionTracer) SentPacket(*ExtendedHeader, ByteCount, *AckFrame, []Frame) {} +func (n NullConnectionTracer) ClosedConnection(err error) {} +func (n NullConnectionTracer) SentTransportParameters(*TransportParameters) {} +func (n NullConnectionTracer) ReceivedTransportParameters(*TransportParameters) {} +func (n NullConnectionTracer) RestoredTransportParameters(*TransportParameters) {} +func (n NullConnectionTracer) SentLongHeaderPacket(*ExtendedHeader, ByteCount, *AckFrame, []Frame) {} +func (n NullConnectionTracer) SentShortHeaderPacket(*ShortHeader, ByteCount, *AckFrame, []Frame) {} func (n NullConnectionTracer) ReceivedVersionNegotiationPacket(dest, src ArbitraryLenConnectionID, _ []VersionNumber) { } func (n NullConnectionTracer) ReceivedRetry(*Header) {} diff --git a/vendor/github.com/lucas-clemente/quic-go/logging/packet_header.go b/vendor/github.com/quic-go/quic-go/logging/packet_header.go similarity index 83% rename from vendor/github.com/lucas-clemente/quic-go/logging/packet_header.go rename to vendor/github.com/quic-go/quic-go/logging/packet_header.go index ea4282fe..6b8df58d 100644 --- a/vendor/github.com/lucas-clemente/quic-go/logging/packet_header.go +++ b/vendor/github.com/quic-go/quic-go/logging/packet_header.go @@ -1,14 +1,11 @@ package logging import ( - "github.com/lucas-clemente/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/protocol" ) // PacketTypeFromHeader determines the packet type from a *wire.Header. func PacketTypeFromHeader(hdr *Header) PacketType { - if !hdr.IsLongHeader { - return PacketType1RTT - } if hdr.Version == 0 { return PacketTypeVersionNegotiation } diff --git a/vendor/github.com/lucas-clemente/quic-go/logging/types.go b/vendor/github.com/quic-go/quic-go/logging/types.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/logging/types.go rename to vendor/github.com/quic-go/quic-go/logging/types.go diff --git a/vendor/github.com/lucas-clemente/quic-go/mockgen.go b/vendor/github.com/quic-go/quic-go/mockgen.go similarity index 50% rename from vendor/github.com/lucas-clemente/quic-go/mockgen.go rename to vendor/github.com/quic-go/quic-go/mockgen.go index 54e7d2aa..abe1faab 100644 --- a/vendor/github.com/lucas-clemente/quic-go/mockgen.go +++ b/vendor/github.com/quic-go/quic-go/mockgen.go @@ -1,27 +1,27 @@ package quic -//go:generate sh -c "./mockgen_private.sh quic mock_send_conn_test.go github.com/lucas-clemente/quic-go sendConn" -//go:generate sh -c "./mockgen_private.sh quic mock_sender_test.go github.com/lucas-clemente/quic-go sender" -//go:generate sh -c "./mockgen_private.sh quic mock_stream_internal_test.go github.com/lucas-clemente/quic-go streamI" -//go:generate sh -c "./mockgen_private.sh quic mock_crypto_stream_test.go github.com/lucas-clemente/quic-go cryptoStream" -//go:generate sh -c "./mockgen_private.sh quic mock_receive_stream_internal_test.go github.com/lucas-clemente/quic-go receiveStreamI" -//go:generate sh -c "./mockgen_private.sh quic mock_send_stream_internal_test.go github.com/lucas-clemente/quic-go sendStreamI" -//go:generate sh -c "./mockgen_private.sh quic mock_stream_sender_test.go github.com/lucas-clemente/quic-go streamSender" -//go:generate sh -c "./mockgen_private.sh quic mock_stream_getter_test.go github.com/lucas-clemente/quic-go streamGetter" -//go:generate sh -c "./mockgen_private.sh quic mock_crypto_data_handler_test.go github.com/lucas-clemente/quic-go cryptoDataHandler" -//go:generate sh -c "./mockgen_private.sh quic mock_frame_source_test.go github.com/lucas-clemente/quic-go frameSource" -//go:generate sh -c "./mockgen_private.sh quic mock_ack_frame_source_test.go github.com/lucas-clemente/quic-go ackFrameSource" -//go:generate sh -c "./mockgen_private.sh quic mock_stream_manager_test.go github.com/lucas-clemente/quic-go streamManager" -//go:generate sh -c "./mockgen_private.sh quic mock_sealing_manager_test.go github.com/lucas-clemente/quic-go sealingManager" -//go:generate sh -c "./mockgen_private.sh quic mock_unpacker_test.go github.com/lucas-clemente/quic-go unpacker" -//go:generate sh -c "./mockgen_private.sh quic mock_packer_test.go github.com/lucas-clemente/quic-go packer" -//go:generate sh -c "./mockgen_private.sh quic mock_mtu_discoverer_test.go github.com/lucas-clemente/quic-go mtuDiscoverer" -//go:generate sh -c "./mockgen_private.sh quic mock_conn_runner_test.go github.com/lucas-clemente/quic-go connRunner" -//go:generate sh -c "./mockgen_private.sh quic mock_quic_conn_test.go github.com/lucas-clemente/quic-go quicConn" -//go:generate sh -c "./mockgen_private.sh quic mock_packet_handler_test.go github.com/lucas-clemente/quic-go packetHandler" -//go:generate sh -c "./mockgen_private.sh quic mock_unknown_packet_handler_test.go github.com/lucas-clemente/quic-go unknownPacketHandler" -//go:generate sh -c "./mockgen_private.sh quic mock_packet_handler_manager_test.go github.com/lucas-clemente/quic-go packetHandlerManager" -//go:generate sh -c "./mockgen_private.sh quic mock_multiplexer_test.go github.com/lucas-clemente/quic-go multiplexer" -//go:generate sh -c "./mockgen_private.sh quic mock_batch_conn_test.go github.com/lucas-clemente/quic-go batchConn" -//go:generate sh -c "go run github.com/golang/mock/mockgen -package quic -self_package github.com/lucas-clemente/quic-go -destination mock_token_store_test.go github.com/lucas-clemente/quic-go TokenStore" -//go:generate sh -c "go run github.com/golang/mock/mockgen -package quic -self_package github.com/lucas-clemente/quic-go -destination mock_packetconn_test.go net PacketConn" +//go:generate sh -c "./mockgen_private.sh quic mock_send_conn_test.go github.com/quic-go/quic-go sendConn" +//go:generate sh -c "./mockgen_private.sh quic mock_sender_test.go github.com/quic-go/quic-go sender" +//go:generate sh -c "./mockgen_private.sh quic mock_stream_internal_test.go github.com/quic-go/quic-go streamI" +//go:generate sh -c "./mockgen_private.sh quic mock_crypto_stream_test.go github.com/quic-go/quic-go cryptoStream" +//go:generate sh -c "./mockgen_private.sh quic mock_receive_stream_internal_test.go github.com/quic-go/quic-go receiveStreamI" +//go:generate sh -c "./mockgen_private.sh quic mock_send_stream_internal_test.go github.com/quic-go/quic-go sendStreamI" +//go:generate sh -c "./mockgen_private.sh quic mock_stream_sender_test.go github.com/quic-go/quic-go streamSender" +//go:generate sh -c "./mockgen_private.sh quic mock_stream_getter_test.go github.com/quic-go/quic-go streamGetter" +//go:generate sh -c "./mockgen_private.sh quic mock_crypto_data_handler_test.go github.com/quic-go/quic-go cryptoDataHandler" +//go:generate sh -c "./mockgen_private.sh quic mock_frame_source_test.go github.com/quic-go/quic-go frameSource" +//go:generate sh -c "./mockgen_private.sh quic mock_ack_frame_source_test.go github.com/quic-go/quic-go ackFrameSource" +//go:generate sh -c "./mockgen_private.sh quic mock_stream_manager_test.go github.com/quic-go/quic-go streamManager" +//go:generate sh -c "./mockgen_private.sh quic mock_sealing_manager_test.go github.com/quic-go/quic-go sealingManager" +//go:generate sh -c "./mockgen_private.sh quic mock_unpacker_test.go github.com/quic-go/quic-go unpacker" +//go:generate sh -c "./mockgen_private.sh quic mock_packer_test.go github.com/quic-go/quic-go packer" +//go:generate sh -c "./mockgen_private.sh quic mock_mtu_discoverer_test.go github.com/quic-go/quic-go mtuDiscoverer" +//go:generate sh -c "./mockgen_private.sh quic mock_conn_runner_test.go github.com/quic-go/quic-go connRunner" +//go:generate sh -c "./mockgen_private.sh quic mock_quic_conn_test.go github.com/quic-go/quic-go quicConn" +//go:generate sh -c "./mockgen_private.sh quic mock_packet_handler_test.go github.com/quic-go/quic-go packetHandler" +//go:generate sh -c "./mockgen_private.sh quic mock_unknown_packet_handler_test.go github.com/quic-go/quic-go unknownPacketHandler" +//go:generate sh -c "./mockgen_private.sh quic mock_packet_handler_manager_test.go github.com/quic-go/quic-go packetHandlerManager" +//go:generate sh -c "./mockgen_private.sh quic mock_multiplexer_test.go github.com/quic-go/quic-go multiplexer" +//go:generate sh -c "./mockgen_private.sh quic mock_batch_conn_test.go github.com/quic-go/quic-go batchConn" +//go:generate sh -c "go run github.com/golang/mock/mockgen -package quic -self_package github.com/quic-go/quic-go -destination mock_token_store_test.go github.com/quic-go/quic-go TokenStore" +//go:generate sh -c "go run github.com/golang/mock/mockgen -package quic -self_package github.com/quic-go/quic-go -destination mock_packetconn_test.go net PacketConn" diff --git a/vendor/github.com/lucas-clemente/quic-go/mockgen_private.sh b/vendor/github.com/quic-go/quic-go/mockgen_private.sh similarity index 96% rename from vendor/github.com/lucas-clemente/quic-go/mockgen_private.sh rename to vendor/github.com/quic-go/quic-go/mockgen_private.sh index bd255904..79f63eee 100644 --- a/vendor/github.com/lucas-clemente/quic-go/mockgen_private.sh +++ b/vendor/github.com/quic-go/quic-go/mockgen_private.sh @@ -17,7 +17,7 @@ for f in *.go; do continue; fi if $(egrep -qe "type (.*) interface" $f); then - AUX+=("github.com/lucas-clemente/quic-go=$f") + AUX+=("github.com/quic-go/quic-go=$f") fi done diff --git a/vendor/github.com/lucas-clemente/quic-go/mtu_discoverer.go b/vendor/github.com/quic-go/quic-go/mtu_discoverer.go similarity index 89% rename from vendor/github.com/lucas-clemente/quic-go/mtu_discoverer.go rename to vendor/github.com/quic-go/quic-go/mtu_discoverer.go index bf38eaac..5a8484c7 100644 --- a/vendor/github.com/lucas-clemente/quic-go/mtu_discoverer.go +++ b/vendor/github.com/quic-go/quic-go/mtu_discoverer.go @@ -3,10 +3,10 @@ package quic import ( "time" - "github.com/lucas-clemente/quic-go/internal/ackhandler" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" + "github.com/quic-go/quic-go/internal/ackhandler" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" ) type mtuDiscoverer interface { diff --git a/vendor/github.com/lucas-clemente/quic-go/multiplexer.go b/vendor/github.com/quic-go/quic-go/multiplexer.go similarity index 96% rename from vendor/github.com/lucas-clemente/quic-go/multiplexer.go rename to vendor/github.com/quic-go/quic-go/multiplexer.go index d1005039..37d4e75c 100644 --- a/vendor/github.com/lucas-clemente/quic-go/multiplexer.go +++ b/vendor/github.com/quic-go/quic-go/multiplexer.go @@ -5,8 +5,8 @@ import ( "net" "sync" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/logging" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/logging" ) var ( diff --git a/vendor/github.com/lucas-clemente/quic-go/packet_handler_map.go b/vendor/github.com/quic-go/quic-go/packet_handler_map.go similarity index 97% rename from vendor/github.com/lucas-clemente/quic-go/packet_handler_map.go rename to vendor/github.com/quic-go/quic-go/packet_handler_map.go index 55f35b7c..e2bc913c 100644 --- a/vendor/github.com/lucas-clemente/quic-go/packet_handler_map.go +++ b/vendor/github.com/quic-go/quic-go/packet_handler_map.go @@ -16,10 +16,10 @@ import ( "sync" "time" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" - "github.com/lucas-clemente/quic-go/logging" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" + "github.com/quic-go/quic-go/logging" ) // rawConn is a connection that allow reading of a receivedPacket. @@ -115,7 +115,7 @@ func newPacketHandlerMap( if disable, _ := strconv.ParseBool(os.Getenv("QUIC_GO_DISABLE_RECEIVE_BUFFER_WARNING")); disable { return } - log.Printf("%s. See https://github.com/lucas-clemente/quic-go/wiki/UDP-Receive-Buffer-Size for details.", err) + log.Printf("%s. See https://github.com/quic-go/quic-go/wiki/UDP-Receive-Buffer-Size for details.", err) }) } } @@ -365,7 +365,7 @@ func (h *packetHandlerMap) listen() { //nolint:staticcheck // SA1019 ignore this! // TODO: This code is used to ignore wsa errors on Windows. // Since net.Error.Temporary is deprecated as of Go 1.18, we should find a better solution. - // See https://github.com/lucas-clemente/quic-go/issues/1737 for details. + // See https://github.com/quic-go/quic-go/issues/1737 for details. if nerr, ok := err.(net.Error); ok && nerr.Temporary() { h.logger.Debugf("Temporary error reading from conn: %w", err) continue diff --git a/vendor/github.com/quic-go/quic-go/packet_packer.go b/vendor/github.com/quic-go/quic-go/packet_packer.go new file mode 100644 index 00000000..14befd46 --- /dev/null +++ b/vendor/github.com/quic-go/quic-go/packet_packer.go @@ -0,0 +1,968 @@ +package quic + +import ( + "errors" + "fmt" + "net" + "time" + + "github.com/quic-go/quic-go/internal/ackhandler" + "github.com/quic-go/quic-go/internal/handshake" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" +) + +var errNothingToPack = errors.New("nothing to pack") + +type packer interface { + PackCoalescedPacket(onlyAck bool, v protocol.VersionNumber) (*coalescedPacket, error) + PackPacket(onlyAck bool, now time.Time, v protocol.VersionNumber) (shortHeaderPacket, *packetBuffer, error) + MaybePackProbePacket(protocol.EncryptionLevel, protocol.VersionNumber) (*coalescedPacket, error) + PackConnectionClose(*qerr.TransportError, protocol.VersionNumber) (*coalescedPacket, error) + PackApplicationClose(*qerr.ApplicationError, protocol.VersionNumber) (*coalescedPacket, error) + + SetMaxPacketSize(protocol.ByteCount) + PackMTUProbePacket(ping ackhandler.Frame, size protocol.ByteCount, now time.Time, v protocol.VersionNumber) (shortHeaderPacket, *packetBuffer, error) + + HandleTransportParameters(*wire.TransportParameters) + SetToken([]byte) +} + +type sealer interface { + handshake.LongHeaderSealer +} + +type payload struct { + frames []*ackhandler.Frame + ack *wire.AckFrame + length protocol.ByteCount +} + +type longHeaderPacket struct { + header *wire.ExtendedHeader + ack *wire.AckFrame + frames []*ackhandler.Frame + + length protocol.ByteCount + + isMTUProbePacket bool +} + +type shortHeaderPacket struct { + *ackhandler.Packet + // used for logging + DestConnID protocol.ConnectionID + Ack *wire.AckFrame + PacketNumberLen protocol.PacketNumberLen + KeyPhase protocol.KeyPhaseBit +} + +func (p *shortHeaderPacket) IsAckEliciting() bool { return ackhandler.HasAckElicitingFrames(p.Frames) } + +type coalescedPacket struct { + buffer *packetBuffer + longHdrPackets []*longHeaderPacket + shortHdrPacket *shortHeaderPacket +} + +func (p *longHeaderPacket) EncryptionLevel() protocol.EncryptionLevel { + //nolint:exhaustive // Will never be called for Retry packets (and they don't have encrypted data). + switch p.header.Type { + case protocol.PacketTypeInitial: + return protocol.EncryptionInitial + case protocol.PacketTypeHandshake: + return protocol.EncryptionHandshake + case protocol.PacketType0RTT: + return protocol.Encryption0RTT + default: + panic("can't determine encryption level") + } +} + +func (p *longHeaderPacket) IsAckEliciting() bool { return ackhandler.HasAckElicitingFrames(p.frames) } + +func (p *longHeaderPacket) ToAckHandlerPacket(now time.Time, q *retransmissionQueue) *ackhandler.Packet { + largestAcked := protocol.InvalidPacketNumber + if p.ack != nil { + largestAcked = p.ack.LargestAcked() + } + encLevel := p.EncryptionLevel() + for i := range p.frames { + if p.frames[i].OnLost != nil { + continue + } + //nolint:exhaustive // Short header packets are handled separately. + switch encLevel { + case protocol.EncryptionInitial: + p.frames[i].OnLost = q.AddInitial + case protocol.EncryptionHandshake: + p.frames[i].OnLost = q.AddHandshake + case protocol.Encryption0RTT: + p.frames[i].OnLost = q.AddAppData + } + } + + ap := ackhandler.GetPacket() + ap.PacketNumber = p.header.PacketNumber + ap.LargestAcked = largestAcked + ap.Frames = p.frames + ap.Length = p.length + ap.EncryptionLevel = encLevel + ap.SendTime = now + ap.IsPathMTUProbePacket = p.isMTUProbePacket + return ap +} + +func getMaxPacketSize(addr net.Addr) protocol.ByteCount { + maxSize := protocol.ByteCount(protocol.MinInitialPacketSize) + // If this is not a UDP address, we don't know anything about the MTU. + // Use the minimum size of an Initial packet as the max packet size. + if udpAddr, ok := addr.(*net.UDPAddr); ok { + if utils.IsIPv4(udpAddr.IP) { + maxSize = protocol.InitialPacketSizeIPv4 + } else { + maxSize = protocol.InitialPacketSizeIPv6 + } + } + return maxSize +} + +type packetNumberManager interface { + PeekPacketNumber(protocol.EncryptionLevel) (protocol.PacketNumber, protocol.PacketNumberLen) + PopPacketNumber(protocol.EncryptionLevel) protocol.PacketNumber +} + +type sealingManager interface { + GetInitialSealer() (handshake.LongHeaderSealer, error) + GetHandshakeSealer() (handshake.LongHeaderSealer, error) + Get0RTTSealer() (handshake.LongHeaderSealer, error) + Get1RTTSealer() (handshake.ShortHeaderSealer, error) +} + +type frameSource interface { + HasData() bool + AppendStreamFrames([]*ackhandler.Frame, protocol.ByteCount, protocol.VersionNumber) ([]*ackhandler.Frame, protocol.ByteCount) + AppendControlFrames([]*ackhandler.Frame, protocol.ByteCount, protocol.VersionNumber) ([]*ackhandler.Frame, protocol.ByteCount) +} + +type ackFrameSource interface { + GetAckFrame(encLevel protocol.EncryptionLevel, onlyIfQueued bool) *wire.AckFrame +} + +type packetPacker struct { + srcConnID protocol.ConnectionID + getDestConnID func() protocol.ConnectionID + + perspective protocol.Perspective + cryptoSetup sealingManager + + initialStream cryptoStream + handshakeStream cryptoStream + + token []byte + + pnManager packetNumberManager + framer frameSource + acks ackFrameSource + datagramQueue *datagramQueue + retransmissionQueue *retransmissionQueue + + maxPacketSize protocol.ByteCount + numNonAckElicitingAcks int +} + +var _ packer = &packetPacker{} + +func newPacketPacker(srcConnID protocol.ConnectionID, getDestConnID func() protocol.ConnectionID, initialStream cryptoStream, handshakeStream cryptoStream, packetNumberManager packetNumberManager, retransmissionQueue *retransmissionQueue, remoteAddr net.Addr, cryptoSetup sealingManager, framer frameSource, acks ackFrameSource, datagramQueue *datagramQueue, perspective protocol.Perspective) *packetPacker { + return &packetPacker{ + cryptoSetup: cryptoSetup, + getDestConnID: getDestConnID, + srcConnID: srcConnID, + initialStream: initialStream, + handshakeStream: handshakeStream, + retransmissionQueue: retransmissionQueue, + datagramQueue: datagramQueue, + perspective: perspective, + framer: framer, + acks: acks, + pnManager: packetNumberManager, + maxPacketSize: getMaxPacketSize(remoteAddr), + } +} + +// PackConnectionClose packs a packet that closes the connection with a transport error. +func (p *packetPacker) PackConnectionClose(e *qerr.TransportError, v protocol.VersionNumber) (*coalescedPacket, error) { + var reason string + // don't send details of crypto errors + if !e.ErrorCode.IsCryptoError() { + reason = e.ErrorMessage + } + return p.packConnectionClose(false, uint64(e.ErrorCode), e.FrameType, reason, v) +} + +// PackApplicationClose packs a packet that closes the connection with an application error. +func (p *packetPacker) PackApplicationClose(e *qerr.ApplicationError, v protocol.VersionNumber) (*coalescedPacket, error) { + return p.packConnectionClose(true, uint64(e.ErrorCode), 0, e.ErrorMessage, v) +} + +func (p *packetPacker) packConnectionClose( + isApplicationError bool, + errorCode uint64, + frameType uint64, + reason string, + v protocol.VersionNumber, +) (*coalescedPacket, error) { + var sealers [4]sealer + var hdrs [3]*wire.ExtendedHeader + var payloads [4]payload + var size protocol.ByteCount + var connID protocol.ConnectionID + var oneRTTPacketNumber protocol.PacketNumber + var oneRTTPacketNumberLen protocol.PacketNumberLen + var keyPhase protocol.KeyPhaseBit // only set for 1-RTT + var numLongHdrPackets uint8 + encLevels := [4]protocol.EncryptionLevel{protocol.EncryptionInitial, protocol.EncryptionHandshake, protocol.Encryption0RTT, protocol.Encryption1RTT} + for i, encLevel := range encLevels { + if p.perspective == protocol.PerspectiveServer && encLevel == protocol.Encryption0RTT { + continue + } + ccf := &wire.ConnectionCloseFrame{ + IsApplicationError: isApplicationError, + ErrorCode: errorCode, + FrameType: frameType, + ReasonPhrase: reason, + } + // don't send application errors in Initial or Handshake packets + if isApplicationError && (encLevel == protocol.EncryptionInitial || encLevel == protocol.EncryptionHandshake) { + ccf.IsApplicationError = false + ccf.ErrorCode = uint64(qerr.ApplicationErrorErrorCode) + ccf.ReasonPhrase = "" + } + pl := payload{ + frames: []*ackhandler.Frame{{Frame: ccf}}, + length: ccf.Length(v), + } + + var sealer sealer + var err error + switch encLevel { + case protocol.EncryptionInitial: + sealer, err = p.cryptoSetup.GetInitialSealer() + case protocol.EncryptionHandshake: + sealer, err = p.cryptoSetup.GetHandshakeSealer() + case protocol.Encryption0RTT: + sealer, err = p.cryptoSetup.Get0RTTSealer() + case protocol.Encryption1RTT: + var s handshake.ShortHeaderSealer + s, err = p.cryptoSetup.Get1RTTSealer() + if err == nil { + keyPhase = s.KeyPhase() + } + sealer = s + } + if err == handshake.ErrKeysNotYetAvailable || err == handshake.ErrKeysDropped { + continue + } + if err != nil { + return nil, err + } + sealers[i] = sealer + var hdr *wire.ExtendedHeader + if encLevel == protocol.Encryption1RTT { + connID = p.getDestConnID() + oneRTTPacketNumber, oneRTTPacketNumberLen = p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) + size += p.shortHeaderPacketLength(connID, oneRTTPacketNumberLen, pl) + } else { + hdr = p.getLongHeader(encLevel, v) + hdrs[i] = hdr + size += p.longHeaderPacketLength(hdr, pl, v) + protocol.ByteCount(sealer.Overhead()) + numLongHdrPackets++ + } + payloads[i] = pl + } + buffer := getPacketBuffer() + packet := &coalescedPacket{ + buffer: buffer, + longHdrPackets: make([]*longHeaderPacket, 0, numLongHdrPackets), + } + for i, encLevel := range encLevels { + if sealers[i] == nil { + continue + } + var paddingLen protocol.ByteCount + if encLevel == protocol.EncryptionInitial { + paddingLen = p.initialPaddingLen(payloads[i].frames, size) + } + if encLevel == protocol.Encryption1RTT { + ap, ack, err := p.appendShortHeaderPacket(buffer, connID, oneRTTPacketNumber, oneRTTPacketNumberLen, keyPhase, payloads[i], paddingLen, sealers[i], false, v) + if err != nil { + return nil, err + } + packet.shortHdrPacket = &shortHeaderPacket{ + Packet: ap, + DestConnID: connID, + Ack: ack, + PacketNumberLen: oneRTTPacketNumberLen, + KeyPhase: keyPhase, + } + } else { + longHdrPacket, err := p.appendLongHeaderPacket(buffer, hdrs[i], payloads[i], paddingLen, encLevel, sealers[i], v) + if err != nil { + return nil, err + } + packet.longHdrPackets = append(packet.longHdrPackets, longHdrPacket) + } + } + return packet, nil +} + +// longHeaderPacketLength calculates the length of a serialized long header packet. +// It takes into account that packets that have a tiny payload need to be padded, +// such that len(payload) + packet number len >= 4 + AEAD overhead +func (p *packetPacker) longHeaderPacketLength(hdr *wire.ExtendedHeader, pl payload, v protocol.VersionNumber) protocol.ByteCount { + var paddingLen protocol.ByteCount + pnLen := protocol.ByteCount(hdr.PacketNumberLen) + if pl.length < 4-pnLen { + paddingLen = 4 - pnLen - pl.length + } + return hdr.GetLength(v) + pl.length + paddingLen +} + +// shortHeaderPacketLength calculates the length of a serialized short header packet. +// It takes into account that packets that have a tiny payload need to be padded, +// such that len(payload) + packet number len >= 4 + AEAD overhead +func (p *packetPacker) shortHeaderPacketLength(connID protocol.ConnectionID, pnLen protocol.PacketNumberLen, pl payload) protocol.ByteCount { + var paddingLen protocol.ByteCount + if pl.length < 4-protocol.ByteCount(pnLen) { + paddingLen = 4 - protocol.ByteCount(pnLen) - pl.length + } + return wire.ShortHeaderLen(connID, pnLen) + pl.length + paddingLen +} + +// size is the expected size of the packet, if no padding was applied. +func (p *packetPacker) initialPaddingLen(frames []*ackhandler.Frame, size protocol.ByteCount) protocol.ByteCount { + // For the server, only ack-eliciting Initial packets need to be padded. + if p.perspective == protocol.PerspectiveServer && !ackhandler.HasAckElicitingFrames(frames) { + return 0 + } + if size >= p.maxPacketSize { + return 0 + } + return p.maxPacketSize - size +} + +// PackCoalescedPacket packs a new packet. +// It packs an Initial / Handshake if there is data to send in these packet number spaces. +// It should only be called before the handshake is confirmed. +func (p *packetPacker) PackCoalescedPacket(onlyAck bool, v protocol.VersionNumber) (*coalescedPacket, error) { + maxPacketSize := p.maxPacketSize + if p.perspective == protocol.PerspectiveClient { + maxPacketSize = protocol.MinInitialPacketSize + } + var ( + initialHdr, handshakeHdr, zeroRTTHdr *wire.ExtendedHeader + initialPayload, handshakePayload, zeroRTTPayload, oneRTTPayload payload + oneRTTPacketNumber protocol.PacketNumber + oneRTTPacketNumberLen protocol.PacketNumberLen + ) + // Try packing an Initial packet. + initialSealer, err := p.cryptoSetup.GetInitialSealer() + if err != nil && err != handshake.ErrKeysDropped { + return nil, err + } + var size protocol.ByteCount + if initialSealer != nil { + initialHdr, initialPayload = p.maybeGetCryptoPacket(maxPacketSize-protocol.ByteCount(initialSealer.Overhead()), protocol.EncryptionInitial, onlyAck, true, v) + if initialPayload.length > 0 { + size += p.longHeaderPacketLength(initialHdr, initialPayload, v) + protocol.ByteCount(initialSealer.Overhead()) + } + } + + // Add a Handshake packet. + var handshakeSealer sealer + if (onlyAck && size == 0) || (!onlyAck && size < maxPacketSize-protocol.MinCoalescedPacketSize) { + var err error + handshakeSealer, err = p.cryptoSetup.GetHandshakeSealer() + if err != nil && err != handshake.ErrKeysDropped && err != handshake.ErrKeysNotYetAvailable { + return nil, err + } + if handshakeSealer != nil { + handshakeHdr, handshakePayload = p.maybeGetCryptoPacket(maxPacketSize-size-protocol.ByteCount(handshakeSealer.Overhead()), protocol.EncryptionHandshake, onlyAck, size == 0, v) + if handshakePayload.length > 0 { + s := p.longHeaderPacketLength(handshakeHdr, handshakePayload, v) + protocol.ByteCount(handshakeSealer.Overhead()) + size += s + } + } + } + + // Add a 0-RTT / 1-RTT packet. + var zeroRTTSealer sealer + var oneRTTSealer handshake.ShortHeaderSealer + var connID protocol.ConnectionID + var kp protocol.KeyPhaseBit + if (onlyAck && size == 0) || (!onlyAck && size < maxPacketSize-protocol.MinCoalescedPacketSize) { + var err error + oneRTTSealer, err = p.cryptoSetup.Get1RTTSealer() + if err != nil && err != handshake.ErrKeysDropped && err != handshake.ErrKeysNotYetAvailable { + return nil, err + } + if err == nil { // 1-RTT + kp = oneRTTSealer.KeyPhase() + connID = p.getDestConnID() + oneRTTPacketNumber, oneRTTPacketNumberLen = p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) + hdrLen := wire.ShortHeaderLen(connID, oneRTTPacketNumberLen) + oneRTTPayload = p.maybeGetShortHeaderPacket(oneRTTSealer, hdrLen, maxPacketSize-size, onlyAck, size == 0, v) + if oneRTTPayload.length > 0 { + size += p.shortHeaderPacketLength(connID, oneRTTPacketNumberLen, oneRTTPayload) + protocol.ByteCount(oneRTTSealer.Overhead()) + } + } else if p.perspective == protocol.PerspectiveClient { // 0-RTT + var err error + zeroRTTSealer, err = p.cryptoSetup.Get0RTTSealer() + if err != nil && err != handshake.ErrKeysDropped && err != handshake.ErrKeysNotYetAvailable { + return nil, err + } + if zeroRTTSealer != nil { + zeroRTTHdr, zeroRTTPayload = p.maybeGetAppDataPacketFor0RTT(zeroRTTSealer, maxPacketSize-size, v) + if zeroRTTPayload.length > 0 { + size += p.longHeaderPacketLength(zeroRTTHdr, zeroRTTPayload, v) + protocol.ByteCount(zeroRTTSealer.Overhead()) + } + } + } + } + + if initialPayload.length == 0 && handshakePayload.length == 0 && zeroRTTPayload.length == 0 && oneRTTPayload.length == 0 { + return nil, nil + } + + buffer := getPacketBuffer() + packet := &coalescedPacket{ + buffer: buffer, + longHdrPackets: make([]*longHeaderPacket, 0, 3), + } + if initialPayload.length > 0 { + padding := p.initialPaddingLen(initialPayload.frames, size) + cont, err := p.appendLongHeaderPacket(buffer, initialHdr, initialPayload, padding, protocol.EncryptionInitial, initialSealer, v) + if err != nil { + return nil, err + } + packet.longHdrPackets = append(packet.longHdrPackets, cont) + } + if handshakePayload.length > 0 { + cont, err := p.appendLongHeaderPacket(buffer, handshakeHdr, handshakePayload, 0, protocol.EncryptionHandshake, handshakeSealer, v) + if err != nil { + return nil, err + } + packet.longHdrPackets = append(packet.longHdrPackets, cont) + } + if zeroRTTPayload.length > 0 { + longHdrPacket, err := p.appendLongHeaderPacket(buffer, zeroRTTHdr, zeroRTTPayload, 0, protocol.Encryption0RTT, zeroRTTSealer, v) + if err != nil { + return nil, err + } + packet.longHdrPackets = append(packet.longHdrPackets, longHdrPacket) + } else if oneRTTPayload.length > 0 { + ap, ack, err := p.appendShortHeaderPacket(buffer, connID, oneRTTPacketNumber, oneRTTPacketNumberLen, kp, oneRTTPayload, 0, oneRTTSealer, false, v) + if err != nil { + return nil, err + } + packet.shortHdrPacket = &shortHeaderPacket{ + Packet: ap, + DestConnID: connID, + Ack: ack, + PacketNumberLen: oneRTTPacketNumberLen, + KeyPhase: kp, + } + } + return packet, nil +} + +// PackPacket packs a packet in the application data packet number space. +// It should be called after the handshake is confirmed. +func (p *packetPacker) PackPacket(onlyAck bool, now time.Time, v protocol.VersionNumber) (shortHeaderPacket, *packetBuffer, error) { + sealer, err := p.cryptoSetup.Get1RTTSealer() + if err != nil { + return shortHeaderPacket{}, nil, err + } + pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) + connID := p.getDestConnID() + hdrLen := wire.ShortHeaderLen(connID, pnLen) + pl := p.maybeGetShortHeaderPacket(sealer, hdrLen, p.maxPacketSize, onlyAck, true, v) + if pl.length == 0 { + return shortHeaderPacket{}, nil, errNothingToPack + } + kp := sealer.KeyPhase() + buffer := getPacketBuffer() + ap, ack, err := p.appendShortHeaderPacket(buffer, connID, pn, pnLen, kp, pl, 0, sealer, false, v) + if err != nil { + return shortHeaderPacket{}, nil, err + } + return shortHeaderPacket{ + Packet: ap, + DestConnID: connID, + Ack: ack, + PacketNumberLen: pnLen, + KeyPhase: kp, + }, buffer, nil +} + +func (p *packetPacker) maybeGetCryptoPacket(maxPacketSize protocol.ByteCount, encLevel protocol.EncryptionLevel, onlyAck, ackAllowed bool, v protocol.VersionNumber) (*wire.ExtendedHeader, payload) { + if onlyAck { + if ack := p.acks.GetAckFrame(encLevel, true); ack != nil { + return p.getLongHeader(encLevel, v), payload{ + ack: ack, + length: ack.Length(v), + } + } + return nil, payload{} + } + + var s cryptoStream + var hasRetransmission bool + //nolint:exhaustive // Initial and Handshake are the only two encryption levels here. + switch encLevel { + case protocol.EncryptionInitial: + s = p.initialStream + hasRetransmission = p.retransmissionQueue.HasInitialData() + case protocol.EncryptionHandshake: + s = p.handshakeStream + hasRetransmission = p.retransmissionQueue.HasHandshakeData() + } + + hasData := s.HasData() + var ack *wire.AckFrame + if ackAllowed { + ack = p.acks.GetAckFrame(encLevel, !hasRetransmission && !hasData) + } + if !hasData && !hasRetransmission && ack == nil { + // nothing to send + return nil, payload{} + } + + var pl payload + if ack != nil { + pl.ack = ack + pl.length = ack.Length(v) + maxPacketSize -= pl.length + } + hdr := p.getLongHeader(encLevel, v) + maxPacketSize -= hdr.GetLength(v) + if hasRetransmission { + for { + var f wire.Frame + //nolint:exhaustive // 0-RTT packets can't contain any retransmission.s + switch encLevel { + case protocol.EncryptionInitial: + f = p.retransmissionQueue.GetInitialFrame(maxPacketSize, v) + case protocol.EncryptionHandshake: + f = p.retransmissionQueue.GetHandshakeFrame(maxPacketSize, v) + } + if f == nil { + break + } + af := ackhandler.GetFrame() + af.Frame = f + pl.frames = append(pl.frames, af) + frameLen := f.Length(v) + pl.length += frameLen + maxPacketSize -= frameLen + } + } else if s.HasData() { + cf := s.PopCryptoFrame(maxPacketSize) + pl.frames = []*ackhandler.Frame{{Frame: cf}} + pl.length += cf.Length(v) + } + return hdr, pl +} + +func (p *packetPacker) maybeGetAppDataPacketFor0RTT(sealer sealer, maxPacketSize protocol.ByteCount, v protocol.VersionNumber) (*wire.ExtendedHeader, payload) { + if p.perspective != protocol.PerspectiveClient { + return nil, payload{} + } + + hdr := p.getLongHeader(protocol.Encryption0RTT, v) + maxPayloadSize := maxPacketSize - hdr.GetLength(v) - protocol.ByteCount(sealer.Overhead()) + return hdr, p.maybeGetAppDataPacket(maxPayloadSize, false, false, v) +} + +func (p *packetPacker) maybeGetShortHeaderPacket(sealer handshake.ShortHeaderSealer, hdrLen protocol.ByteCount, maxPacketSize protocol.ByteCount, onlyAck, ackAllowed bool, v protocol.VersionNumber) payload { + maxPayloadSize := maxPacketSize - hdrLen - protocol.ByteCount(sealer.Overhead()) + return p.maybeGetAppDataPacket(maxPayloadSize, onlyAck, ackAllowed, v) +} + +func (p *packetPacker) maybeGetAppDataPacket(maxPayloadSize protocol.ByteCount, onlyAck, ackAllowed bool, v protocol.VersionNumber) payload { + pl := p.composeNextPacket(maxPayloadSize, onlyAck, ackAllowed, v) + + // check if we have anything to send + if len(pl.frames) == 0 { + if pl.ack == nil { + return payload{} + } + // the packet only contains an ACK + if p.numNonAckElicitingAcks >= protocol.MaxNonAckElicitingAcks { + ping := &wire.PingFrame{} + // don't retransmit the PING frame when it is lost + af := ackhandler.GetFrame() + af.Frame = ping + af.OnLost = func(wire.Frame) {} + pl.frames = append(pl.frames, af) + pl.length += ping.Length(v) + p.numNonAckElicitingAcks = 0 + } else { + p.numNonAckElicitingAcks++ + } + } else { + p.numNonAckElicitingAcks = 0 + } + return pl +} + +func (p *packetPacker) composeNextPacket(maxFrameSize protocol.ByteCount, onlyAck, ackAllowed bool, v protocol.VersionNumber) payload { + if onlyAck { + if ack := p.acks.GetAckFrame(protocol.Encryption1RTT, true); ack != nil { + return payload{ + ack: ack, + length: ack.Length(v), + } + } + return payload{} + } + + pl := payload{frames: make([]*ackhandler.Frame, 0, 1)} + + hasData := p.framer.HasData() + hasRetransmission := p.retransmissionQueue.HasAppData() + + var hasAck bool + if ackAllowed { + if ack := p.acks.GetAckFrame(protocol.Encryption1RTT, !hasRetransmission && !hasData); ack != nil { + pl.ack = ack + pl.length += ack.Length(v) + hasAck = true + } + } + + if p.datagramQueue != nil { + if f := p.datagramQueue.Peek(); f != nil { + size := f.Length(v) + if size <= maxFrameSize-pl.length { + af := ackhandler.GetFrame() + af.Frame = f + // set it to a no-op. Then we won't set the default callback, which would retransmit the frame. + af.OnLost = func(wire.Frame) {} + pl.frames = append(pl.frames, af) + pl.length += size + p.datagramQueue.Pop() + } + } + } + + if hasAck && !hasData && !hasRetransmission { + return pl + } + + if hasRetransmission { + for { + remainingLen := maxFrameSize - pl.length + if remainingLen < protocol.MinStreamFrameSize { + break + } + f := p.retransmissionQueue.GetAppDataFrame(remainingLen, v) + if f == nil { + break + } + af := ackhandler.GetFrame() + af.Frame = f + pl.frames = append(pl.frames, af) + pl.length += f.Length(v) + } + } + + if hasData { + var lengthAdded protocol.ByteCount + pl.frames, lengthAdded = p.framer.AppendControlFrames(pl.frames, maxFrameSize-pl.length, v) + pl.length += lengthAdded + + pl.frames, lengthAdded = p.framer.AppendStreamFrames(pl.frames, maxFrameSize-pl.length, v) + pl.length += lengthAdded + } + return pl +} + +func (p *packetPacker) MaybePackProbePacket(encLevel protocol.EncryptionLevel, v protocol.VersionNumber) (*coalescedPacket, error) { + if encLevel == protocol.Encryption1RTT { + s, err := p.cryptoSetup.Get1RTTSealer() + if err != nil { + return nil, err + } + kp := s.KeyPhase() + connID := p.getDestConnID() + pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) + hdrLen := wire.ShortHeaderLen(connID, pnLen) + pl := p.maybeGetAppDataPacket(p.maxPacketSize-protocol.ByteCount(s.Overhead())-hdrLen, false, true, v) + if pl.length == 0 { + return nil, nil + } + buffer := getPacketBuffer() + packet := &coalescedPacket{buffer: buffer} + ap, ack, err := p.appendShortHeaderPacket(buffer, connID, pn, pnLen, kp, pl, 0, s, false, v) + if err != nil { + return nil, err + } + packet.shortHdrPacket = &shortHeaderPacket{ + Packet: ap, + DestConnID: connID, + Ack: ack, + PacketNumberLen: pnLen, + KeyPhase: kp, + } + return packet, nil + } + + var hdr *wire.ExtendedHeader + var pl payload + var sealer handshake.LongHeaderSealer + //nolint:exhaustive // Probe packets are never sent for 0-RTT. + switch encLevel { + case protocol.EncryptionInitial: + var err error + sealer, err = p.cryptoSetup.GetInitialSealer() + if err != nil { + return nil, err + } + hdr, pl = p.maybeGetCryptoPacket(p.maxPacketSize-protocol.ByteCount(sealer.Overhead()), protocol.EncryptionInitial, false, true, v) + case protocol.EncryptionHandshake: + var err error + sealer, err = p.cryptoSetup.GetHandshakeSealer() + if err != nil { + return nil, err + } + hdr, pl = p.maybeGetCryptoPacket(p.maxPacketSize-protocol.ByteCount(sealer.Overhead()), protocol.EncryptionHandshake, false, true, v) + default: + panic("unknown encryption level") + } + + if pl.length == 0 { + return nil, nil + } + buffer := getPacketBuffer() + packet := &coalescedPacket{buffer: buffer} + size := p.longHeaderPacketLength(hdr, pl, v) + protocol.ByteCount(sealer.Overhead()) + var padding protocol.ByteCount + if encLevel == protocol.EncryptionInitial { + padding = p.initialPaddingLen(pl.frames, size) + } + + longHdrPacket, err := p.appendLongHeaderPacket(buffer, hdr, pl, padding, encLevel, sealer, v) + if err != nil { + return nil, err + } + packet.longHdrPackets = []*longHeaderPacket{longHdrPacket} + return packet, nil +} + +func (p *packetPacker) PackMTUProbePacket(ping ackhandler.Frame, size protocol.ByteCount, now time.Time, v protocol.VersionNumber) (shortHeaderPacket, *packetBuffer, error) { + pl := payload{ + frames: []*ackhandler.Frame{&ping}, + length: ping.Length(v), + } + buffer := getPacketBuffer() + s, err := p.cryptoSetup.Get1RTTSealer() + if err != nil { + return shortHeaderPacket{}, nil, err + } + connID := p.getDestConnID() + pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT) + padding := size - p.shortHeaderPacketLength(connID, pnLen, pl) - protocol.ByteCount(s.Overhead()) + kp := s.KeyPhase() + ap, ack, err := p.appendShortHeaderPacket(buffer, connID, pn, pnLen, kp, pl, padding, s, true, v) + if err != nil { + return shortHeaderPacket{}, nil, err + } + return shortHeaderPacket{ + Packet: ap, + DestConnID: connID, + Ack: ack, + PacketNumberLen: pnLen, + KeyPhase: kp, + }, buffer, nil +} + +func (p *packetPacker) getLongHeader(encLevel protocol.EncryptionLevel, v protocol.VersionNumber) *wire.ExtendedHeader { + pn, pnLen := p.pnManager.PeekPacketNumber(encLevel) + hdr := &wire.ExtendedHeader{ + PacketNumber: pn, + PacketNumberLen: pnLen, + } + hdr.Version = v + hdr.SrcConnectionID = p.srcConnID + hdr.DestConnectionID = p.getDestConnID() + + //nolint:exhaustive // 1-RTT packets are not long header packets. + switch encLevel { + case protocol.EncryptionInitial: + hdr.Type = protocol.PacketTypeInitial + hdr.Token = p.token + case protocol.EncryptionHandshake: + hdr.Type = protocol.PacketTypeHandshake + case protocol.Encryption0RTT: + hdr.Type = protocol.PacketType0RTT + } + return hdr +} + +func (p *packetPacker) appendLongHeaderPacket(buffer *packetBuffer, header *wire.ExtendedHeader, pl payload, padding protocol.ByteCount, encLevel protocol.EncryptionLevel, sealer sealer, v protocol.VersionNumber) (*longHeaderPacket, error) { + var paddingLen protocol.ByteCount + pnLen := protocol.ByteCount(header.PacketNumberLen) + if pl.length < 4-pnLen { + paddingLen = 4 - pnLen - pl.length + } + paddingLen += padding + header.Length = pnLen + protocol.ByteCount(sealer.Overhead()) + pl.length + paddingLen + + startLen := len(buffer.Data) + raw := buffer.Data[startLen:] + raw, err := header.Append(raw, v) + if err != nil { + return nil, err + } + payloadOffset := protocol.ByteCount(len(raw)) + + pn := p.pnManager.PopPacketNumber(encLevel) + if pn != header.PacketNumber { + return nil, errors.New("packetPacker BUG: Peeked and Popped packet numbers do not match") + } + + raw, err = p.appendPacketPayload(raw, pl, paddingLen, v) + if err != nil { + return nil, err + } + raw = p.encryptPacket(raw, sealer, pn, payloadOffset, pnLen) + buffer.Data = buffer.Data[:len(buffer.Data)+len(raw)] + + return &longHeaderPacket{ + header: header, + ack: pl.ack, + frames: pl.frames, + length: protocol.ByteCount(len(raw)), + }, nil +} + +func (p *packetPacker) appendShortHeaderPacket( + buffer *packetBuffer, + connID protocol.ConnectionID, + pn protocol.PacketNumber, + pnLen protocol.PacketNumberLen, + kp protocol.KeyPhaseBit, + pl payload, + padding protocol.ByteCount, + sealer sealer, + isMTUProbePacket bool, + v protocol.VersionNumber, +) (*ackhandler.Packet, *wire.AckFrame, error) { + var paddingLen protocol.ByteCount + if pl.length < 4-protocol.ByteCount(pnLen) { + paddingLen = 4 - protocol.ByteCount(pnLen) - pl.length + } + paddingLen += padding + + startLen := len(buffer.Data) + raw := buffer.Data[startLen:] + raw, err := wire.AppendShortHeader(raw, connID, pn, pnLen, kp) + if err != nil { + return nil, nil, err + } + payloadOffset := protocol.ByteCount(len(raw)) + + if pn != p.pnManager.PopPacketNumber(protocol.Encryption1RTT) { + return nil, nil, errors.New("packetPacker BUG: Peeked and Popped packet numbers do not match") + } + + raw, err = p.appendPacketPayload(raw, pl, paddingLen, v) + if err != nil { + return nil, nil, err + } + if !isMTUProbePacket { + if size := protocol.ByteCount(len(raw) + sealer.Overhead()); size > p.maxPacketSize { + return nil, nil, fmt.Errorf("PacketPacker BUG: packet too large (%d bytes, allowed %d bytes)", size, p.maxPacketSize) + } + } + raw = p.encryptPacket(raw, sealer, pn, payloadOffset, protocol.ByteCount(pnLen)) + buffer.Data = buffer.Data[:len(buffer.Data)+len(raw)] + + // create the ackhandler.Packet + largestAcked := protocol.InvalidPacketNumber + if pl.ack != nil { + largestAcked = pl.ack.LargestAcked() + } + for i := range pl.frames { + if pl.frames[i].OnLost != nil { + continue + } + pl.frames[i].OnLost = p.retransmissionQueue.AddAppData + } + + ap := ackhandler.GetPacket() + ap.PacketNumber = pn + ap.LargestAcked = largestAcked + ap.Frames = pl.frames + ap.Length = protocol.ByteCount(len(raw)) + ap.EncryptionLevel = protocol.Encryption1RTT + ap.SendTime = time.Now() + ap.IsPathMTUProbePacket = isMTUProbePacket + + return ap, pl.ack, nil +} + +func (p *packetPacker) appendPacketPayload(raw []byte, pl payload, paddingLen protocol.ByteCount, v protocol.VersionNumber) ([]byte, error) { + payloadOffset := len(raw) + if pl.ack != nil { + var err error + raw, err = pl.ack.Append(raw, v) + if err != nil { + return nil, err + } + } + if paddingLen > 0 { + raw = append(raw, make([]byte, paddingLen)...) + } + for _, frame := range pl.frames { + var err error + raw, err = frame.Append(raw, v) + if err != nil { + return nil, err + } + } + + if payloadSize := protocol.ByteCount(len(raw)-payloadOffset) - paddingLen; payloadSize != pl.length { + return nil, fmt.Errorf("PacketPacker BUG: payload size inconsistent (expected %d, got %d bytes)", pl.length, payloadSize) + } + return raw, nil +} + +func (p *packetPacker) encryptPacket(raw []byte, sealer sealer, pn protocol.PacketNumber, payloadOffset, pnLen protocol.ByteCount) []byte { + _ = sealer.Seal(raw[payloadOffset:payloadOffset], raw[payloadOffset:], pn, raw[:payloadOffset]) + raw = raw[:len(raw)+sealer.Overhead()] + // apply header protection + pnOffset := payloadOffset - pnLen + sealer.EncryptHeader(raw[pnOffset+4:pnOffset+4+16], &raw[0], raw[pnOffset:payloadOffset]) + return raw +} + +func (p *packetPacker) SetToken(token []byte) { + p.token = token +} + +// When a higher MTU is discovered, use it. +func (p *packetPacker) SetMaxPacketSize(s protocol.ByteCount) { + p.maxPacketSize = s +} + +// If the peer sets a max_packet_size that's smaller than the size we're currently using, +// we need to reduce the size of packets we send. +func (p *packetPacker) HandleTransportParameters(params *wire.TransportParameters) { + if params.MaxUDPPayloadSize != 0 { + p.maxPacketSize = utils.Min(p.maxPacketSize, params.MaxUDPPayloadSize) + } +} diff --git a/vendor/github.com/lucas-clemente/quic-go/packet_unpacker.go b/vendor/github.com/quic-go/quic-go/packet_unpacker.go similarity index 89% rename from vendor/github.com/lucas-clemente/quic-go/packet_unpacker.go rename to vendor/github.com/quic-go/quic-go/packet_unpacker.go index e7754145..103524c7 100644 --- a/vendor/github.com/lucas-clemente/quic-go/packet_unpacker.go +++ b/vendor/github.com/quic-go/quic-go/packet_unpacker.go @@ -5,10 +5,10 @@ import ( "fmt" "time" - "github.com/lucas-clemente/quic-go/internal/handshake" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/wire" + "github.com/quic-go/quic-go/internal/handshake" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/wire" ) type headerDecryptor interface { @@ -38,16 +38,14 @@ type packetUnpacker struct { cs handshake.CryptoSetup shortHdrConnIDLen int - version protocol.VersionNumber } var _ unpacker = &packetUnpacker{} -func newPacketUnpacker(cs handshake.CryptoSetup, shortHdrConnIDLen int, version protocol.VersionNumber) unpacker { +func newPacketUnpacker(cs handshake.CryptoSetup, shortHdrConnIDLen int) *packetUnpacker { return &packetUnpacker{ cs: cs, shortHdrConnIDLen: shortHdrConnIDLen, - version: version, } } @@ -55,7 +53,7 @@ func newPacketUnpacker(cs handshake.CryptoSetup, shortHdrConnIDLen int, version // If the reserved bits are invalid, the error is wire.ErrInvalidReservedBits. // If any other error occurred when parsing the header, the error is of type headerParseError. // If decrypting the payload fails for any reason, the error is the error returned by the AEAD. -func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, data []byte) (*unpackedPacket, error) { +func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, data []byte, v protocol.VersionNumber) (*unpackedPacket, error) { var encLevel protocol.EncryptionLevel var extHdr *wire.ExtendedHeader var decrypted []byte @@ -67,7 +65,7 @@ func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, d if err != nil { return nil, err } - extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data) + extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data, v) if err != nil { return nil, err } @@ -77,7 +75,7 @@ func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, d if err != nil { return nil, err } - extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data) + extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data, v) if err != nil { return nil, err } @@ -87,7 +85,7 @@ func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, d if err != nil { return nil, err } - extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data) + extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data, v) if err != nil { return nil, err } @@ -127,8 +125,8 @@ func (u *packetUnpacker) UnpackShortHeader(rcvTime time.Time, data []byte) (prot return pn, pnLen, kp, decrypted, nil } -func (u *packetUnpacker) unpackLongHeaderPacket(opener handshake.LongHeaderOpener, hdr *wire.Header, data []byte) (*wire.ExtendedHeader, []byte, error) { - extHdr, parseErr := u.unpackLongHeader(opener, hdr, data) +func (u *packetUnpacker) unpackLongHeaderPacket(opener handshake.LongHeaderOpener, hdr *wire.Header, data []byte, v protocol.VersionNumber) (*wire.ExtendedHeader, []byte, error) { + extHdr, parseErr := u.unpackLongHeader(opener, hdr, data, v) // If the reserved bits are set incorrectly, we still need to continue unpacking. // This avoids a timing side-channel, which otherwise might allow an attacker // to gain information about the header encryption. @@ -189,15 +187,15 @@ func (u *packetUnpacker) unpackShortHeader(hd headerDecryptor, data []byte) (int } // The error is either nil, a wire.ErrInvalidReservedBits or of type headerParseError. -func (u *packetUnpacker) unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte) (*wire.ExtendedHeader, error) { - extHdr, err := unpackLongHeader(hd, hdr, data, u.version) +func (u *packetUnpacker) unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte, v protocol.VersionNumber) (*wire.ExtendedHeader, error) { + extHdr, err := unpackLongHeader(hd, hdr, data, v) if err != nil && err != wire.ErrInvalidReservedBits { return nil, &headerParseError{err: err} } return extHdr, err } -func unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte, version protocol.VersionNumber) (*wire.ExtendedHeader, error) { +func unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte, v protocol.VersionNumber) (*wire.ExtendedHeader, error) { r := bytes.NewReader(data) hdrLen := hdr.ParsedLen() @@ -216,7 +214,7 @@ func unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte, version data[hdrLen:hdrLen+4], ) // 3. parse the header (and learn the actual length of the packet number) - extHdr, parseErr := hdr.ParseExtended(r, version) + extHdr, parseErr := hdr.ParseExtended(r, v) if parseErr != nil && parseErr != wire.ErrInvalidReservedBits { return nil, parseErr } diff --git a/vendor/github.com/lucas-clemente/quic-go/qlog/event.go b/vendor/github.com/quic-go/quic-go/qlog/event.go similarity index 98% rename from vendor/github.com/lucas-clemente/quic-go/qlog/event.go rename to vendor/github.com/quic-go/quic-go/qlog/event.go index f6799652..fbbdf1ac 100644 --- a/vendor/github.com/lucas-clemente/quic-go/qlog/event.go +++ b/vendor/github.com/quic-go/quic-go/qlog/event.go @@ -6,10 +6,10 @@ import ( "net" "time" - "github.com/lucas-clemente/quic-go" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/logging" + "github.com/quic-go/quic-go" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/logging" "github.com/francoispqt/gojay" ) @@ -154,7 +154,7 @@ func (e eventConnectionClosed) MarshalJSONObject(enc *gojay.Encoder) { } type eventPacketSent struct { - Header packetHeader + Header gojay.MarshalerJSONObject // either a shortHeader or a packetHeader Length logging.ByteCount PayloadLength logging.ByteCount Frames frames diff --git a/vendor/github.com/lucas-clemente/quic-go/qlog/frame.go b/vendor/github.com/quic-go/quic-go/qlog/frame.go similarity index 98% rename from vendor/github.com/lucas-clemente/quic-go/qlog/frame.go rename to vendor/github.com/quic-go/quic-go/qlog/frame.go index 35761dae..0d44f073 100644 --- a/vendor/github.com/lucas-clemente/quic-go/qlog/frame.go +++ b/vendor/github.com/quic-go/quic-go/qlog/frame.go @@ -3,8 +3,8 @@ package qlog import ( "fmt" - "github.com/lucas-clemente/quic-go/internal/wire" - "github.com/lucas-clemente/quic-go/logging" + "github.com/quic-go/quic-go/internal/wire" + "github.com/quic-go/quic-go/logging" "github.com/francoispqt/gojay" ) diff --git a/vendor/github.com/lucas-clemente/quic-go/qlog/packet_header.go b/vendor/github.com/quic-go/quic-go/qlog/packet_header.go similarity index 97% rename from vendor/github.com/lucas-clemente/quic-go/qlog/packet_header.go rename to vendor/github.com/quic-go/quic-go/qlog/packet_header.go index 2e5ca530..106499b0 100644 --- a/vendor/github.com/lucas-clemente/quic-go/qlog/packet_header.go +++ b/vendor/github.com/quic-go/quic-go/qlog/packet_header.go @@ -3,8 +3,8 @@ package qlog import ( "fmt" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/logging" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/logging" "github.com/francoispqt/gojay" ) diff --git a/vendor/github.com/lucas-clemente/quic-go/qlog/qlog.go b/vendor/github.com/quic-go/quic-go/qlog/qlog.go similarity index 92% rename from vendor/github.com/lucas-clemente/quic-go/qlog/qlog.go rename to vendor/github.com/quic-go/quic-go/qlog/qlog.go index 7c74590f..bc2bb233 100644 --- a/vendor/github.com/lucas-clemente/quic-go/qlog/qlog.go +++ b/vendor/github.com/quic-go/quic-go/qlog/qlog.go @@ -11,17 +11,17 @@ import ( "sync" "time" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" - "github.com/lucas-clemente/quic-go/logging" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" + "github.com/quic-go/quic-go/logging" "github.com/francoispqt/gojay" ) // Setting of this only works when quic-go is used as a library. // When building a binary from this repository, the version can be set using the following go build flag: -// -ldflags="-X github.com/lucas-clemente/quic-go/qlog.quicGoVersion=foobar" +// -ldflags="-X github.com/quic-go/quic-go/qlog.quicGoVersion=foobar" var quicGoVersion = "(devel)" func init() { @@ -33,7 +33,7 @@ func init() { return } for _, d := range info.Deps { - if d.Path == "github.com/lucas-clemente/quic-go" { + if d.Path == "github.com/quic-go/quic-go" { quicGoVersion = d.Version if d.Replace != nil { if len(d.Replace.Version) > 0 { @@ -274,7 +274,15 @@ func (t *connectionTracer) toTransportParameters(tp *wire.TransportParameters) * } } -func (t *connectionTracer) SentPacket(hdr *wire.ExtendedHeader, packetSize logging.ByteCount, ack *logging.AckFrame, frames []logging.Frame) { +func (t *connectionTracer) SentLongHeaderPacket(hdr *logging.ExtendedHeader, packetSize logging.ByteCount, ack *logging.AckFrame, frames []logging.Frame) { + t.sentPacket(*transformLongHeader(hdr), packetSize, hdr.Length, ack, frames) +} + +func (t *connectionTracer) SentShortHeaderPacket(hdr *logging.ShortHeader, packetSize logging.ByteCount, ack *logging.AckFrame, frames []logging.Frame) { + t.sentPacket(*transformShortHeader(hdr), packetSize, 0, ack, frames) +} + +func (t *connectionTracer) sentPacket(hdr gojay.MarshalerJSONObject, packetSize, payloadLen logging.ByteCount, ack *logging.AckFrame, frames []logging.Frame) { numFrames := len(frames) if ack != nil { numFrames++ @@ -286,12 +294,11 @@ func (t *connectionTracer) SentPacket(hdr *wire.ExtendedHeader, packetSize loggi for _, f := range frames { fs = append(fs, frame{Frame: f}) } - header := *transformLongHeader(hdr) t.mutex.Lock() t.recordEvent(time.Now(), &eventPacketSent{ - Header: header, + Header: hdr, Length: packetSize, - PayloadLength: hdr.Length, + PayloadLength: payloadLen, Frames: fs, }) t.mutex.Unlock() @@ -319,12 +326,11 @@ func (t *connectionTracer) ReceivedShortHeaderPacket(hdr *logging.ShortHeader, p fs[i] = frame{Frame: f} } header := *transformShortHeader(hdr) - hdrLen := 1 + hdr.DestConnectionID.Len() + int(hdr.PacketNumberLen) t.mutex.Lock() t.recordEvent(time.Now(), &eventPacketReceived{ Header: header, Length: packetSize, - PayloadLength: packetSize - protocol.ByteCount(hdrLen), + PayloadLength: packetSize - wire.ShortHeaderLen(hdr.DestConnectionID, hdr.PacketNumberLen), Frames: fs, }) t.mutex.Unlock() diff --git a/vendor/github.com/lucas-clemente/quic-go/qlog/trace.go b/vendor/github.com/quic-go/quic-go/qlog/trace.go similarity index 94% rename from vendor/github.com/lucas-clemente/quic-go/qlog/trace.go rename to vendor/github.com/quic-go/quic-go/qlog/trace.go index cf61558a..fbd7e739 100644 --- a/vendor/github.com/lucas-clemente/quic-go/qlog/trace.go +++ b/vendor/github.com/quic-go/quic-go/qlog/trace.go @@ -3,11 +3,10 @@ package qlog import ( "time" - "github.com/lucas-clemente/quic-go/logging" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/logging" "github.com/francoispqt/gojay" - - "github.com/lucas-clemente/quic-go/internal/protocol" ) type topLevel struct { diff --git a/vendor/github.com/lucas-clemente/quic-go/qlog/types.go b/vendor/github.com/quic-go/quic-go/qlog/types.go similarity index 97% rename from vendor/github.com/lucas-clemente/quic-go/qlog/types.go rename to vendor/github.com/quic-go/quic-go/qlog/types.go index 42e562f9..c47ad481 100644 --- a/vendor/github.com/lucas-clemente/quic-go/qlog/types.go +++ b/vendor/github.com/quic-go/quic-go/qlog/types.go @@ -3,9 +3,9 @@ package qlog import ( "fmt" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/logging" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/logging" ) type owner uint8 diff --git a/vendor/github.com/lucas-clemente/quic-go/quicvarint/io.go b/vendor/github.com/quic-go/quic-go/quicvarint/io.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/quicvarint/io.go rename to vendor/github.com/quic-go/quic-go/quicvarint/io.go diff --git a/vendor/github.com/lucas-clemente/quic-go/quicvarint/varint.go b/vendor/github.com/quic-go/quic-go/quicvarint/varint.go similarity index 88% rename from vendor/github.com/lucas-clemente/quic-go/quicvarint/varint.go rename to vendor/github.com/quic-go/quic-go/quicvarint/varint.go index 28dcaa93..cbebfe61 100644 --- a/vendor/github.com/lucas-clemente/quic-go/quicvarint/varint.go +++ b/vendor/github.com/quic-go/quic-go/quicvarint/varint.go @@ -4,7 +4,7 @@ import ( "fmt" "io" - "github.com/lucas-clemente/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/protocol" ) // taken from the QUIC draft @@ -71,6 +71,7 @@ func Read(r io.ByteReader) (uint64, error) { } // Write writes i in the QUIC varint format to w. +// Deprecated: use Append instead. func Write(w Writer, i uint64) { if i <= maxVarInt1 { w.WriteByte(uint8(i)) @@ -88,6 +89,7 @@ func Write(w Writer, i uint64) { } } +// Append appends i in the QUIC varint format. func Append(b []byte, i uint64) []byte { if i <= maxVarInt1 { return append(b, uint8(i)) @@ -107,32 +109,32 @@ func Append(b []byte, i uint64) []byte { panic(fmt.Sprintf("%#x doesn't fit into 62 bits", i)) } -// WriteWithLen writes i in the QUIC varint format with the desired length to w. -func WriteWithLen(w Writer, i uint64, length protocol.ByteCount) { +// AppendWithLen append i in the QUIC varint format with the desired length. +func AppendWithLen(b []byte, i uint64, length protocol.ByteCount) []byte { if length != 1 && length != 2 && length != 4 && length != 8 { panic("invalid varint length") } l := Len(i) if l == length { - Write(w, i) - return + return Append(b, i) } if l > length { panic(fmt.Sprintf("cannot encode %d in %d bytes", i, length)) } if length == 2 { - w.WriteByte(0b01000000) + b = append(b, 0b01000000) } else if length == 4 { - w.WriteByte(0b10000000) + b = append(b, 0b10000000) } else if length == 8 { - w.WriteByte(0b11000000) + b = append(b, 0b11000000) } for j := protocol.ByteCount(1); j < length-l; j++ { - w.WriteByte(0) + b = append(b, 0) } for j := protocol.ByteCount(0); j < l; j++ { - w.WriteByte(uint8(i >> (8 * (l - 1 - j)))) + b = append(b, uint8(i>>(8*(l-1-j)))) } + return b } // Len determines the number of bytes that will be needed to write the number i. diff --git a/vendor/github.com/lucas-clemente/quic-go/receive_stream.go b/vendor/github.com/quic-go/quic-go/receive_stream.go similarity index 86% rename from vendor/github.com/lucas-clemente/quic-go/receive_stream.go rename to vendor/github.com/quic-go/quic-go/receive_stream.go index ae6a449b..0a7e9416 100644 --- a/vendor/github.com/lucas-clemente/quic-go/receive_stream.go +++ b/vendor/github.com/quic-go/quic-go/receive_stream.go @@ -6,11 +6,11 @@ import ( "sync" "time" - "github.com/lucas-clemente/quic-go/internal/flowcontrol" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" + "github.com/quic-go/quic-go/internal/flowcontrol" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" ) type receiveStreamI interface { @@ -34,24 +34,19 @@ type receiveStream struct { currentFrame []byte currentFrameDone func() - currentFrameIsLast bool // is the currentFrame the last frame on this stream readPosInFrame int + currentFrameIsLast bool // is the currentFrame the last frame on this stream + finRead bool // set once we read a frame with a Fin closeForShutdownErr error cancelReadErr error resetRemotelyErr *StreamError - closedForShutdown bool // set when CloseForShutdown() is called - finRead bool // set once we read a frame with a Fin - canceledRead bool // set when CancelRead() is called - resetRemotely bool // set when HandleResetStreamFrame() is called - readChan chan struct{} readOnce chan struct{} // cap: 1, to protect against concurrent use of Read deadline time.Time flowController flowcontrol.StreamFlowController - version protocol.VersionNumber } var ( @@ -63,7 +58,6 @@ func newReceiveStream( streamID protocol.StreamID, sender streamSender, flowController flowcontrol.StreamFlowController, - version protocol.VersionNumber, ) *receiveStream { return &receiveStream{ streamID: streamID, @@ -73,7 +67,6 @@ func newReceiveStream( readChan: make(chan struct{}, 1), readOnce: make(chan struct{}, 1), finalOffset: protocol.MaxByteCount, - version: version, } } @@ -103,13 +96,13 @@ func (s *receiveStream) readImpl(p []byte) (bool /*stream completed */, int, err if s.finRead { return false, 0, io.EOF } - if s.canceledRead { + if s.cancelReadErr != nil { return false, 0, s.cancelReadErr } - if s.resetRemotely { + if s.resetRemotelyErr != nil { return false, 0, s.resetRemotelyErr } - if s.closedForShutdown { + if s.closeForShutdownErr != nil { return false, 0, s.closeForShutdownErr } @@ -125,13 +118,13 @@ func (s *receiveStream) readImpl(p []byte) (bool /*stream completed */, int, err for { // Stop waiting on errors - if s.closedForShutdown { + if s.closeForShutdownErr != nil { return false, bytesRead, s.closeForShutdownErr } - if s.canceledRead { + if s.cancelReadErr != nil { return false, bytesRead, s.cancelReadErr } - if s.resetRemotely { + if s.resetRemotelyErr != nil { return false, bytesRead, s.resetRemotelyErr } @@ -178,8 +171,9 @@ func (s *receiveStream) readImpl(p []byte) (bool /*stream completed */, int, err s.readPosInFrame += m bytesRead += m - // when a RESET_STREAM was received, the was already informed about the final byteOffset for this stream - if !s.resetRemotely { + // when a RESET_STREAM was received, the flow controller was already + // informed about the final byteOffset for this stream + if s.resetRemotelyErr == nil { s.flowController.AddBytesRead(protocol.ByteCount(m)) } @@ -214,11 +208,10 @@ func (s *receiveStream) CancelRead(errorCode StreamErrorCode) { } func (s *receiveStream) cancelReadImpl(errorCode qerr.StreamErrorCode) bool /* completed */ { - if s.finRead || s.canceledRead || s.resetRemotely { + if s.finRead || s.cancelReadErr != nil || s.resetRemotelyErr != nil { return false } - s.canceledRead = true - s.cancelReadErr = fmt.Errorf("Read on stream %d canceled with error code %d", s.streamID, errorCode) + s.cancelReadErr = &StreamError{StreamID: s.streamID, ErrorCode: errorCode, Remote: false} s.signalRead() s.sender.queueControlFrame(&wire.StopSendingFrame{ StreamID: s.streamID, @@ -250,7 +243,7 @@ func (s *receiveStream) handleStreamFrameImpl(frame *wire.StreamFrame) (bool /* newlyRcvdFinalOffset = s.finalOffset == protocol.MaxByteCount s.finalOffset = maxOffset } - if s.canceledRead { + if s.cancelReadErr != nil { return newlyRcvdFinalOffset, nil } if err := s.frameQueue.Push(frame.Data, frame.Offset, frame.PutBack); err != nil { @@ -273,7 +266,7 @@ func (s *receiveStream) handleResetStreamFrame(frame *wire.ResetStreamFrame) err } func (s *receiveStream) handleResetStreamFrameImpl(frame *wire.ResetStreamFrame) (bool /*completed */, error) { - if s.closedForShutdown { + if s.closeForShutdownErr != nil { return false, nil } if err := s.flowController.UpdateHighestReceived(frame.FinalSize, true); err != nil { @@ -283,13 +276,13 @@ func (s *receiveStream) handleResetStreamFrameImpl(frame *wire.ResetStreamFrame) s.finalOffset = frame.FinalSize // ignore duplicate RESET_STREAM frames for this stream (after checking their final offset) - if s.resetRemotely { + if s.resetRemotelyErr != nil { return false, nil } - s.resetRemotely = true s.resetRemotelyErr = &StreamError{ StreamID: s.streamID, ErrorCode: frame.ErrorCode, + Remote: true, } s.signalRead() return newlyRcvdFinalOffset, nil @@ -312,7 +305,6 @@ func (s *receiveStream) SetReadDeadline(t time.Time) error { // The peer will NOT be informed about this: the stream is closed without sending a FIN or RESET. func (s *receiveStream) closeForShutdown(err error) { s.mutex.Lock() - s.closedForShutdown = true s.closeForShutdownErr = err s.mutex.Unlock() s.signalRead() diff --git a/vendor/github.com/lucas-clemente/quic-go/retransmission_queue.go b/vendor/github.com/quic-go/quic-go/retransmission_queue.go similarity index 83% rename from vendor/github.com/lucas-clemente/quic-go/retransmission_queue.go rename to vendor/github.com/quic-go/quic-go/retransmission_queue.go index 0cfbbc4d..2ce0b893 100644 --- a/vendor/github.com/lucas-clemente/quic-go/retransmission_queue.go +++ b/vendor/github.com/quic-go/quic-go/retransmission_queue.go @@ -3,8 +3,8 @@ package quic import ( "fmt" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/wire" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/wire" ) type retransmissionQueue struct { @@ -15,12 +15,10 @@ type retransmissionQueue struct { handshakeCryptoData []*wire.CryptoFrame appData []wire.Frame - - version protocol.VersionNumber } -func newRetransmissionQueue(ver protocol.VersionNumber) *retransmissionQueue { - return &retransmissionQueue{version: ver} +func newRetransmissionQueue() *retransmissionQueue { + return &retransmissionQueue{} } func (q *retransmissionQueue) AddInitial(f wire.Frame) { @@ -58,10 +56,10 @@ func (q *retransmissionQueue) AddAppData(f wire.Frame) { q.appData = append(q.appData, f) } -func (q *retransmissionQueue) GetInitialFrame(maxLen protocol.ByteCount) wire.Frame { +func (q *retransmissionQueue) GetInitialFrame(maxLen protocol.ByteCount, v protocol.VersionNumber) wire.Frame { if len(q.initialCryptoData) > 0 { f := q.initialCryptoData[0] - newFrame, needsSplit := f.MaybeSplitOffFrame(maxLen, q.version) + newFrame, needsSplit := f.MaybeSplitOffFrame(maxLen, v) if newFrame == nil && !needsSplit { // the whole frame fits q.initialCryptoData = q.initialCryptoData[1:] return f @@ -74,17 +72,17 @@ func (q *retransmissionQueue) GetInitialFrame(maxLen protocol.ByteCount) wire.Fr return nil } f := q.initial[0] - if f.Length(q.version) > maxLen { + if f.Length(v) > maxLen { return nil } q.initial = q.initial[1:] return f } -func (q *retransmissionQueue) GetHandshakeFrame(maxLen protocol.ByteCount) wire.Frame { +func (q *retransmissionQueue) GetHandshakeFrame(maxLen protocol.ByteCount, v protocol.VersionNumber) wire.Frame { if len(q.handshakeCryptoData) > 0 { f := q.handshakeCryptoData[0] - newFrame, needsSplit := f.MaybeSplitOffFrame(maxLen, q.version) + newFrame, needsSplit := f.MaybeSplitOffFrame(maxLen, v) if newFrame == nil && !needsSplit { // the whole frame fits q.handshakeCryptoData = q.handshakeCryptoData[1:] return f @@ -97,19 +95,19 @@ func (q *retransmissionQueue) GetHandshakeFrame(maxLen protocol.ByteCount) wire. return nil } f := q.handshake[0] - if f.Length(q.version) > maxLen { + if f.Length(v) > maxLen { return nil } q.handshake = q.handshake[1:] return f } -func (q *retransmissionQueue) GetAppDataFrame(maxLen protocol.ByteCount) wire.Frame { +func (q *retransmissionQueue) GetAppDataFrame(maxLen protocol.ByteCount, v protocol.VersionNumber) wire.Frame { if len(q.appData) == 0 { return nil } f := q.appData[0] - if f.Length(q.version) > maxLen { + if f.Length(v) > maxLen { return nil } q.appData = q.appData[1:] diff --git a/vendor/github.com/lucas-clemente/quic-go/send_conn.go b/vendor/github.com/quic-go/quic-go/send_conn.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/send_conn.go rename to vendor/github.com/quic-go/quic-go/send_conn.go diff --git a/vendor/github.com/lucas-clemente/quic-go/send_queue.go b/vendor/github.com/quic-go/quic-go/send_queue.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/send_queue.go rename to vendor/github.com/quic-go/quic-go/send_queue.go diff --git a/vendor/github.com/lucas-clemente/quic-go/send_stream.go b/vendor/github.com/quic-go/quic-go/send_stream.go similarity index 82% rename from vendor/github.com/lucas-clemente/quic-go/send_stream.go rename to vendor/github.com/quic-go/quic-go/send_stream.go index 66807927..cebe30ef 100644 --- a/vendor/github.com/lucas-clemente/quic-go/send_stream.go +++ b/vendor/github.com/quic-go/quic-go/send_stream.go @@ -6,19 +6,19 @@ import ( "sync" "time" - "github.com/lucas-clemente/quic-go/internal/ackhandler" - "github.com/lucas-clemente/quic-go/internal/flowcontrol" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" + "github.com/quic-go/quic-go/internal/ackhandler" + "github.com/quic-go/quic-go/internal/flowcontrol" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" ) type sendStreamI interface { SendStream handleStopSendingFrame(*wire.StopSendingFrame) hasData() bool - popStreamFrame(maxBytes protocol.ByteCount) (*ackhandler.Frame, bool) + popStreamFrame(maxBytes protocol.ByteCount, v protocol.VersionNumber) (*ackhandler.Frame, bool) closeForShutdown(error) updateSendWindow(protocol.ByteCount) } @@ -40,11 +40,9 @@ type sendStream struct { cancelWriteErr error closeForShutdownErr error - closedForShutdown bool // set when CloseForShutdown() is called - finishedWriting bool // set once Close() is called - canceledWrite bool // set when CancelWrite() is called, or a STOP_SENDING frame is received - finSent bool // set when a STREAM_FRAME with FIN bit has been sent - completed bool // set when this stream has been reported to the streamSender as completed + finishedWriting bool // set once Close() is called + finSent bool // set when a STREAM_FRAME with FIN bit has been sent + completed bool // set when this stream has been reported to the streamSender as completed dataForWriting []byte // during a Write() call, this slice is the part of p that still needs to be sent out nextFrame *wire.StreamFrame @@ -54,8 +52,6 @@ type sendStream struct { deadline time.Time flowController flowcontrol.StreamFlowController - - version protocol.VersionNumber } var ( @@ -67,7 +63,6 @@ func newSendStream( streamID protocol.StreamID, sender streamSender, flowController flowcontrol.StreamFlowController, - version protocol.VersionNumber, ) *sendStream { s := &sendStream{ streamID: streamID, @@ -75,7 +70,6 @@ func newSendStream( flowController: flowController, writeChan: make(chan struct{}, 1), writeOnce: make(chan struct{}, 1), // cap: 1, to protect against concurrent use of Write - version: version, } s.ctx, s.ctxCancel = context.WithCancel(context.Background()) return s @@ -98,7 +92,7 @@ func (s *sendStream) Write(p []byte) (int, error) { if s.finishedWriting { return 0, fmt.Errorf("write on closed stream %d", s.streamID) } - if s.canceledWrite { + if s.cancelWriteErr != nil { return 0, s.cancelWriteErr } if s.closeForShutdownErr != nil { @@ -157,7 +151,7 @@ func (s *sendStream) Write(p []byte) (int, error) { } deadlineTimer.Reset(deadline) } - if s.dataForWriting == nil || s.canceledWrite || s.closedForShutdown { + if s.dataForWriting == nil || s.cancelWriteErr != nil || s.closeForShutdownErr != nil { break } } @@ -204,9 +198,9 @@ func (s *sendStream) canBufferStreamFrame() bool { // popStreamFrame returns the next STREAM frame that is supposed to be sent on this stream // maxBytes is the maximum length this frame (including frame header) will have. -func (s *sendStream) popStreamFrame(maxBytes protocol.ByteCount) (*ackhandler.Frame, bool /* has more data to send */) { +func (s *sendStream) popStreamFrame(maxBytes protocol.ByteCount, v protocol.VersionNumber) (*ackhandler.Frame, bool /* has more data to send */) { s.mutex.Lock() - f, hasMoreData := s.popNewOrRetransmittedStreamFrame(maxBytes) + f, hasMoreData := s.popNewOrRetransmittedStreamFrame(maxBytes, v) if f != nil { s.numOutstandingFrames++ } @@ -215,16 +209,20 @@ func (s *sendStream) popStreamFrame(maxBytes protocol.ByteCount) (*ackhandler.Fr if f == nil { return nil, hasMoreData } - return &ackhandler.Frame{Frame: f, OnLost: s.queueRetransmission, OnAcked: s.frameAcked}, hasMoreData + af := ackhandler.GetFrame() + af.Frame = f + af.OnLost = s.queueRetransmission + af.OnAcked = s.frameAcked + return af, hasMoreData } -func (s *sendStream) popNewOrRetransmittedStreamFrame(maxBytes protocol.ByteCount) (*wire.StreamFrame, bool /* has more data to send */) { - if s.canceledWrite || s.closeForShutdownErr != nil { +func (s *sendStream) popNewOrRetransmittedStreamFrame(maxBytes protocol.ByteCount, v protocol.VersionNumber) (*wire.StreamFrame, bool /* has more data to send */) { + if s.cancelWriteErr != nil || s.closeForShutdownErr != nil { return nil, false } if len(s.retransmissionQueue) > 0 { - f, hasMoreRetransmissions := s.maybeGetRetransmission(maxBytes) + f, hasMoreRetransmissions := s.maybeGetRetransmission(maxBytes, v) if f != nil || hasMoreRetransmissions { if f == nil { return nil, true @@ -260,7 +258,7 @@ func (s *sendStream) popNewOrRetransmittedStreamFrame(maxBytes protocol.ByteCoun return nil, true } - f, hasMoreData := s.popNewStreamFrame(maxBytes, sendWindow) + f, hasMoreData := s.popNewStreamFrame(maxBytes, sendWindow, v) if dataLen := f.DataLen(); dataLen > 0 { s.writeOffset += f.DataLen() s.flowController.AddBytesSent(f.DataLen()) @@ -272,12 +270,12 @@ func (s *sendStream) popNewOrRetransmittedStreamFrame(maxBytes protocol.ByteCoun return f, hasMoreData } -func (s *sendStream) popNewStreamFrame(maxBytes, sendWindow protocol.ByteCount) (*wire.StreamFrame, bool) { +func (s *sendStream) popNewStreamFrame(maxBytes, sendWindow protocol.ByteCount, v protocol.VersionNumber) (*wire.StreamFrame, bool) { if s.nextFrame != nil { nextFrame := s.nextFrame s.nextFrame = nil - maxDataLen := utils.Min(sendWindow, nextFrame.MaxDataLen(maxBytes, s.version)) + maxDataLen := utils.Min(sendWindow, nextFrame.MaxDataLen(maxBytes, v)) if nextFrame.DataLen() > maxDataLen { s.nextFrame = wire.GetStreamFrame() s.nextFrame.StreamID = s.streamID @@ -299,7 +297,7 @@ func (s *sendStream) popNewStreamFrame(maxBytes, sendWindow protocol.ByteCount) f.DataLenPresent = true f.Data = f.Data[:0] - hasMoreData := s.popNewStreamFrameWithoutBuffer(f, maxBytes, sendWindow) + hasMoreData := s.popNewStreamFrameWithoutBuffer(f, maxBytes, sendWindow, v) if len(f.Data) == 0 && !f.Fin { f.PutBack() return nil, hasMoreData @@ -307,8 +305,8 @@ func (s *sendStream) popNewStreamFrame(maxBytes, sendWindow protocol.ByteCount) return f, hasMoreData } -func (s *sendStream) popNewStreamFrameWithoutBuffer(f *wire.StreamFrame, maxBytes, sendWindow protocol.ByteCount) bool { - maxDataLen := f.MaxDataLen(maxBytes, s.version) +func (s *sendStream) popNewStreamFrameWithoutBuffer(f *wire.StreamFrame, maxBytes, sendWindow protocol.ByteCount, v protocol.VersionNumber) bool { + maxDataLen := f.MaxDataLen(maxBytes, v) if maxDataLen == 0 { // a STREAM frame must have at least one byte of data return s.dataForWriting != nil || s.nextFrame != nil || s.finishedWriting } @@ -317,9 +315,9 @@ func (s *sendStream) popNewStreamFrameWithoutBuffer(f *wire.StreamFrame, maxByte return s.dataForWriting != nil || s.nextFrame != nil || s.finishedWriting } -func (s *sendStream) maybeGetRetransmission(maxBytes protocol.ByteCount) (*wire.StreamFrame, bool /* has more retransmissions */) { +func (s *sendStream) maybeGetRetransmission(maxBytes protocol.ByteCount, v protocol.VersionNumber) (*wire.StreamFrame, bool /* has more retransmissions */) { f := s.retransmissionQueue[0] - newFrame, needsSplit := f.MaybeSplitOffFrame(maxBytes, s.version) + newFrame, needsSplit := f.MaybeSplitOffFrame(maxBytes, v) if needsSplit { return newFrame, true } @@ -354,7 +352,7 @@ func (s *sendStream) frameAcked(f wire.Frame) { f.(*wire.StreamFrame).PutBack() s.mutex.Lock() - if s.canceledWrite { + if s.cancelWriteErr != nil { s.mutex.Unlock() return } @@ -371,7 +369,7 @@ func (s *sendStream) frameAcked(f wire.Frame) { } func (s *sendStream) isNewlyCompleted() bool { - completed := (s.finSent || s.canceledWrite) && s.numOutstandingFrames == 0 && len(s.retransmissionQueue) == 0 + completed := (s.finSent || s.cancelWriteErr != nil) && s.numOutstandingFrames == 0 && len(s.retransmissionQueue) == 0 if completed && !s.completed { s.completed = true return true @@ -383,7 +381,7 @@ func (s *sendStream) queueRetransmission(f wire.Frame) { sf := f.(*wire.StreamFrame) sf.DataLenPresent = true s.mutex.Lock() - if s.canceledWrite { + if s.cancelWriteErr != nil { s.mutex.Unlock() return } @@ -399,11 +397,11 @@ func (s *sendStream) queueRetransmission(f wire.Frame) { func (s *sendStream) Close() error { s.mutex.Lock() - if s.closedForShutdown { + if s.closeForShutdownErr != nil { s.mutex.Unlock() return nil } - if s.canceledWrite { + if s.cancelWriteErr != nil { s.mutex.Unlock() return fmt.Errorf("close called for canceled stream %d", s.streamID) } @@ -416,19 +414,18 @@ func (s *sendStream) Close() error { } func (s *sendStream) CancelWrite(errorCode StreamErrorCode) { - s.cancelWriteImpl(errorCode, fmt.Errorf("Write on stream %d canceled with error code %d", s.streamID, errorCode)) + s.cancelWriteImpl(errorCode, false) } // must be called after locking the mutex -func (s *sendStream) cancelWriteImpl(errorCode qerr.StreamErrorCode, writeErr error) { +func (s *sendStream) cancelWriteImpl(errorCode qerr.StreamErrorCode, remote bool) { s.mutex.Lock() - if s.canceledWrite { + if s.cancelWriteErr != nil { s.mutex.Unlock() return } s.ctxCancel() - s.canceledWrite = true - s.cancelWriteErr = writeErr + s.cancelWriteErr = &StreamError{StreamID: s.streamID, ErrorCode: errorCode, Remote: remote} s.numOutstandingFrames = 0 s.retransmissionQueue = nil newlyCompleted := s.isNewlyCompleted() @@ -457,10 +454,7 @@ func (s *sendStream) updateSendWindow(limit protocol.ByteCount) { } func (s *sendStream) handleStopSendingFrame(frame *wire.StopSendingFrame) { - s.cancelWriteImpl(frame.ErrorCode, &StreamError{ - StreamID: s.streamID, - ErrorCode: frame.ErrorCode, - }) + s.cancelWriteImpl(frame.ErrorCode, true) } func (s *sendStream) Context() context.Context { @@ -481,7 +475,6 @@ func (s *sendStream) SetWriteDeadline(t time.Time) error { func (s *sendStream) closeForShutdown(err error) { s.mutex.Lock() s.ctxCancel() - s.closedForShutdown = true s.closeForShutdownErr = err s.mutex.Unlock() s.signalWrite() diff --git a/vendor/github.com/lucas-clemente/quic-go/server.go b/vendor/github.com/quic-go/quic-go/server.go similarity index 92% rename from vendor/github.com/lucas-clemente/quic-go/server.go rename to vendor/github.com/quic-go/quic-go/server.go index 16d4d818..734d617f 100644 --- a/vendor/github.com/lucas-clemente/quic-go/server.go +++ b/vendor/github.com/quic-go/quic-go/server.go @@ -1,7 +1,6 @@ package quic import ( - "bytes" "context" "crypto/rand" "crypto/tls" @@ -12,12 +11,12 @@ import ( "sync/atomic" "time" - "github.com/lucas-clemente/quic-go/internal/handshake" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" - "github.com/lucas-clemente/quic-go/logging" + "github.com/quic-go/quic-go/internal/handshake" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/wire" + "github.com/quic-go/quic-go/logging" ) // ErrServerClosed is returned by the Listener or EarlyListener's Accept method after a call to Close. @@ -88,7 +87,6 @@ type baseServer struct { *Config, *tls.Config, *handshake.TokenGenerator, - bool, /* enable 0-RTT */ bool, /* client address validated by an address validation token */ logging.ConnectionTracer, uint64, @@ -343,13 +341,13 @@ func (s *baseServer) handlePacketImpl(p *receivedPacket) bool /* is the buffer s return false } if !s.config.DisableVersionNegotiationPackets { - go s.sendVersionNegotiationPacket(p.remoteAddr, src, dest, p.info.OOB()) + go s.sendVersionNegotiationPacket(p.remoteAddr, src, dest, p.info.OOB(), v) } return false } // If we're creating a new connection, the packet will be passed to the connection. // The header will then be parsed again. - hdr, _, _, err := wire.ParsePacket(p.data, s.config.ConnectionIDGenerator.ConnectionIDLen()) + hdr, _, _, err := wire.ParsePacket(p.data) if err != nil { if s.config.Tracer != nil { s.config.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropHeaderParseError) @@ -365,7 +363,7 @@ func (s *baseServer) handlePacketImpl(p *receivedPacket) bool /* is the buffer s return false } - if hdr.IsLongHeader && hdr.Type != protocol.PacketTypeInitial { + if hdr.Type != protocol.PacketTypeInitial { // Drop long header packets. // There's little point in sending a Stateless Reset, since the client // might not have received the token yet. @@ -506,7 +504,6 @@ func (s *baseServer) handleInitialImpl(p *receivedPacket, hdr *wire.Header) erro s.config, s.tlsConf, s.tokenGenerator, - s.acceptEarlyConns, clientAddrIsValid, tracer, tracingID, @@ -568,7 +565,6 @@ func (s *baseServer) sendRetry(remoteAddr net.Addr, hdr *wire.Header, info *pack return err } replyHdr := &wire.ExtendedHeader{} - replyHdr.IsLongHeader = true replyHdr.Type = protocol.PacketTypeRetry replyHdr.Version = hdr.Version replyHdr.SrcConnectionID = srcConnID @@ -580,19 +576,19 @@ func (s *baseServer) sendRetry(remoteAddr net.Addr, hdr *wire.Header, info *pack replyHdr.Log(s.logger) } - packetBuffer := getPacketBuffer() - defer packetBuffer.Release() - buf := bytes.NewBuffer(packetBuffer.Data) - if err := replyHdr.Write(buf, hdr.Version); err != nil { + buf := getPacketBuffer() + defer buf.Release() + buf.Data, err = replyHdr.Append(buf.Data, hdr.Version) + if err != nil { return err } // append the Retry integrity tag - tag := handshake.GetRetryIntegrityTag(buf.Bytes(), hdr.DestConnectionID, hdr.Version) - buf.Write(tag[:]) + tag := handshake.GetRetryIntegrityTag(buf.Data, hdr.DestConnectionID, hdr.Version) + buf.Data = append(buf.Data, tag[:]...) if s.config.Tracer != nil { - s.config.Tracer.SentPacket(remoteAddr, &replyHdr.Header, protocol.ByteCount(buf.Len()), nil) + s.config.Tracer.SentPacket(remoteAddr, &replyHdr.Header, protocol.ByteCount(len(buf.Data)), nil) } - _, err = s.conn.WritePacket(buf.Bytes(), remoteAddr, info.OOB()) + _, err = s.conn.WritePacket(buf.Data, remoteAddr, info.OOB()) return err } @@ -630,52 +626,51 @@ func (s *baseServer) sendConnectionRefused(remoteAddr net.Addr, hdr *wire.Header // sendError sends the error as a response to the packet received with header hdr func (s *baseServer) sendError(remoteAddr net.Addr, hdr *wire.Header, sealer handshake.LongHeaderSealer, errorCode qerr.TransportErrorCode, info *packetInfo) error { - packetBuffer := getPacketBuffer() - defer packetBuffer.Release() - buf := bytes.NewBuffer(packetBuffer.Data) + b := getPacketBuffer() + defer b.Release() ccf := &wire.ConnectionCloseFrame{ErrorCode: uint64(errorCode)} replyHdr := &wire.ExtendedHeader{} - replyHdr.IsLongHeader = true replyHdr.Type = protocol.PacketTypeInitial replyHdr.Version = hdr.Version replyHdr.SrcConnectionID = hdr.DestConnectionID replyHdr.DestConnectionID = hdr.SrcConnectionID replyHdr.PacketNumberLen = protocol.PacketNumberLen4 replyHdr.Length = 4 /* packet number len */ + ccf.Length(hdr.Version) + protocol.ByteCount(sealer.Overhead()) - if err := replyHdr.Write(buf, hdr.Version); err != nil { + var err error + b.Data, err = replyHdr.Append(b.Data, hdr.Version) + if err != nil { return err } - payloadOffset := buf.Len() + payloadOffset := len(b.Data) - raw := buf.Bytes() - raw, err := ccf.Append(raw, hdr.Version) + b.Data, err = ccf.Append(b.Data, hdr.Version) if err != nil { return err } - _ = sealer.Seal(raw[payloadOffset:payloadOffset], raw[payloadOffset:], replyHdr.PacketNumber, raw[:payloadOffset]) - raw = raw[0 : len(raw)+sealer.Overhead()] + _ = sealer.Seal(b.Data[payloadOffset:payloadOffset], b.Data[payloadOffset:], replyHdr.PacketNumber, b.Data[:payloadOffset]) + b.Data = b.Data[0 : len(b.Data)+sealer.Overhead()] pnOffset := payloadOffset - int(replyHdr.PacketNumberLen) sealer.EncryptHeader( - raw[pnOffset+4:pnOffset+4+16], - &raw[0], - raw[pnOffset:payloadOffset], + b.Data[pnOffset+4:pnOffset+4+16], + &b.Data[0], + b.Data[pnOffset:payloadOffset], ) replyHdr.Log(s.logger) wire.LogFrame(s.logger, ccf, true) if s.config.Tracer != nil { - s.config.Tracer.SentPacket(remoteAddr, &replyHdr.Header, protocol.ByteCount(len(raw)), []logging.Frame{ccf}) + s.config.Tracer.SentPacket(remoteAddr, &replyHdr.Header, protocol.ByteCount(len(b.Data)), []logging.Frame{ccf}) } - _, err = s.conn.WritePacket(raw, remoteAddr, info.OOB()) + _, err = s.conn.WritePacket(b.Data, remoteAddr, info.OOB()) return err } -func (s *baseServer) sendVersionNegotiationPacket(remote net.Addr, src, dest protocol.ArbitraryLenConnectionID, oob []byte) { - s.logger.Debugf("Client offered version %s, sending Version Negotiation") +func (s *baseServer) sendVersionNegotiationPacket(remote net.Addr, src, dest protocol.ArbitraryLenConnectionID, oob []byte, v protocol.VersionNumber) { + s.logger.Debugf("Client offered version %s, sending Version Negotiation", v) data := wire.ComposeVersionNegotiation(dest, src, s.config.Versions) if s.config.Tracer != nil { diff --git a/vendor/github.com/lucas-clemente/quic-go/stream.go b/vendor/github.com/quic-go/quic-go/stream.go similarity index 89% rename from vendor/github.com/lucas-clemente/quic-go/stream.go rename to vendor/github.com/quic-go/quic-go/stream.go index 95bbcb35..98d2fc6e 100644 --- a/vendor/github.com/lucas-clemente/quic-go/stream.go +++ b/vendor/github.com/quic-go/quic-go/stream.go @@ -6,10 +6,10 @@ import ( "sync" "time" - "github.com/lucas-clemente/quic-go/internal/ackhandler" - "github.com/lucas-clemente/quic-go/internal/flowcontrol" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/wire" + "github.com/quic-go/quic-go/internal/ackhandler" + "github.com/quic-go/quic-go/internal/flowcontrol" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/wire" ) type deadlineError struct{} @@ -60,7 +60,7 @@ type streamI interface { // for sending hasData() bool handleStopSendingFrame(*wire.StopSendingFrame) - popStreamFrame(maxBytes protocol.ByteCount) (*ackhandler.Frame, bool) + popStreamFrame(maxBytes protocol.ByteCount, v protocol.VersionNumber) (*ackhandler.Frame, bool) updateSendWindow(protocol.ByteCount) } @@ -80,8 +80,6 @@ type stream struct { sender streamSender receiveStreamCompleted bool sendStreamCompleted bool - - version protocol.VersionNumber } var _ Stream = &stream{} @@ -90,9 +88,8 @@ var _ Stream = &stream{} func newStream(streamID protocol.StreamID, sender streamSender, flowController flowcontrol.StreamFlowController, - version protocol.VersionNumber, ) *stream { - s := &stream{sender: sender, version: version} + s := &stream{sender: sender} senderForSendStream := &uniStreamSender{ streamSender: sender, onStreamCompletedImpl: func() { @@ -102,7 +99,7 @@ func newStream(streamID protocol.StreamID, s.completedMutex.Unlock() }, } - s.sendStream = *newSendStream(streamID, senderForSendStream, flowController, version) + s.sendStream = *newSendStream(streamID, senderForSendStream, flowController) senderForReceiveStream := &uniStreamSender{ streamSender: sender, onStreamCompletedImpl: func() { @@ -112,7 +109,7 @@ func newStream(streamID protocol.StreamID, s.completedMutex.Unlock() }, } - s.receiveStream = *newReceiveStream(streamID, senderForReceiveStream, flowController, version) + s.receiveStream = *newReceiveStream(streamID, senderForReceiveStream, flowController) return s } diff --git a/vendor/github.com/lucas-clemente/quic-go/streams_map.go b/vendor/github.com/quic-go/quic-go/streams_map.go similarity index 94% rename from vendor/github.com/lucas-clemente/quic-go/streams_map.go rename to vendor/github.com/quic-go/quic-go/streams_map.go index e9f0c2e1..b1a80eb3 100644 --- a/vendor/github.com/lucas-clemente/quic-go/streams_map.go +++ b/vendor/github.com/quic-go/quic-go/streams_map.go @@ -7,10 +7,10 @@ import ( "net" "sync" - "github.com/lucas-clemente/quic-go/internal/flowcontrol" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/wire" + "github.com/quic-go/quic-go/internal/flowcontrol" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/qerr" + "github.com/quic-go/quic-go/internal/wire" ) type streamError struct { @@ -46,7 +46,6 @@ var errTooManyOpenStreams = errors.New("too many open streams") type streamsMap struct { perspective protocol.Perspective - version protocol.VersionNumber maxIncomingBidiStreams uint64 maxIncomingUniStreams uint64 @@ -70,7 +69,6 @@ func newStreamsMap( maxIncomingBidiStreams uint64, maxIncomingUniStreams uint64, perspective protocol.Perspective, - version protocol.VersionNumber, ) streamManager { m := &streamsMap{ perspective: perspective, @@ -78,7 +76,6 @@ func newStreamsMap( maxIncomingBidiStreams: maxIncomingBidiStreams, maxIncomingUniStreams: maxIncomingUniStreams, sender: sender, - version: version, } m.initMaps() return m @@ -89,7 +86,7 @@ func (m *streamsMap) initMaps() { protocol.StreamTypeBidi, func(num protocol.StreamNum) streamI { id := num.StreamID(protocol.StreamTypeBidi, m.perspective) - return newStream(id, m.sender, m.newFlowController(id), m.version) + return newStream(id, m.sender, m.newFlowController(id)) }, m.sender.queueControlFrame, ) @@ -97,7 +94,7 @@ func (m *streamsMap) initMaps() { protocol.StreamTypeBidi, func(num protocol.StreamNum) streamI { id := num.StreamID(protocol.StreamTypeBidi, m.perspective.Opposite()) - return newStream(id, m.sender, m.newFlowController(id), m.version) + return newStream(id, m.sender, m.newFlowController(id)) }, m.maxIncomingBidiStreams, m.sender.queueControlFrame, @@ -106,7 +103,7 @@ func (m *streamsMap) initMaps() { protocol.StreamTypeUni, func(num protocol.StreamNum) sendStreamI { id := num.StreamID(protocol.StreamTypeUni, m.perspective) - return newSendStream(id, m.sender, m.newFlowController(id), m.version) + return newSendStream(id, m.sender, m.newFlowController(id)) }, m.sender.queueControlFrame, ) @@ -114,7 +111,7 @@ func (m *streamsMap) initMaps() { protocol.StreamTypeUni, func(num protocol.StreamNum) receiveStreamI { id := num.StreamID(protocol.StreamTypeUni, m.perspective.Opposite()) - return newReceiveStream(id, m.sender, m.newFlowController(id), m.version) + return newReceiveStream(id, m.sender, m.newFlowController(id)) }, m.maxIncomingUniStreams, m.sender.queueControlFrame, diff --git a/vendor/github.com/lucas-clemente/quic-go/streams_map_incoming.go b/vendor/github.com/quic-go/quic-go/streams_map_incoming.go similarity index 98% rename from vendor/github.com/lucas-clemente/quic-go/streams_map_incoming.go rename to vendor/github.com/quic-go/quic-go/streams_map_incoming.go index 6fe0c61b..18ec6f99 100644 --- a/vendor/github.com/lucas-clemente/quic-go/streams_map_incoming.go +++ b/vendor/github.com/quic-go/quic-go/streams_map_incoming.go @@ -4,8 +4,8 @@ import ( "context" "sync" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/wire" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/wire" ) type incomingStream interface { diff --git a/vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing.go b/vendor/github.com/quic-go/quic-go/streams_map_outgoing.go similarity index 98% rename from vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing.go rename to vendor/github.com/quic-go/quic-go/streams_map_outgoing.go index d4f249f0..fd45f4e7 100644 --- a/vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing.go +++ b/vendor/github.com/quic-go/quic-go/streams_map_outgoing.go @@ -4,8 +4,8 @@ import ( "context" "sync" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/wire" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/wire" ) type outgoingStream interface { diff --git a/vendor/github.com/lucas-clemente/quic-go/sys_conn.go b/vendor/github.com/quic-go/quic-go/sys_conn.go similarity index 95% rename from vendor/github.com/lucas-clemente/quic-go/sys_conn.go rename to vendor/github.com/quic-go/quic-go/sys_conn.go index 7cc05465..d6c1d616 100644 --- a/vendor/github.com/lucas-clemente/quic-go/sys_conn.go +++ b/vendor/github.com/quic-go/quic-go/sys_conn.go @@ -5,8 +5,8 @@ import ( "syscall" "time" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" ) // OOBCapablePacketConn is a connection that allows the reading of ECN bits from the IP header. diff --git a/vendor/github.com/lucas-clemente/quic-go/sys_conn_df.go b/vendor/github.com/quic-go/quic-go/sys_conn_df.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/sys_conn_df.go rename to vendor/github.com/quic-go/quic-go/sys_conn_df.go diff --git a/vendor/github.com/lucas-clemente/quic-go/sys_conn_df_linux.go b/vendor/github.com/quic-go/quic-go/sys_conn_df_linux.go similarity index 95% rename from vendor/github.com/lucas-clemente/quic-go/sys_conn_df_linux.go rename to vendor/github.com/quic-go/quic-go/sys_conn_df_linux.go index c4923164..98542b41 100644 --- a/vendor/github.com/lucas-clemente/quic-go/sys_conn_df_linux.go +++ b/vendor/github.com/quic-go/quic-go/sys_conn_df_linux.go @@ -6,8 +6,9 @@ import ( "errors" "syscall" - "github.com/lucas-clemente/quic-go/internal/utils" "golang.org/x/sys/unix" + + "github.com/quic-go/quic-go/internal/utils" ) func setDF(rawConn syscall.RawConn) error { diff --git a/vendor/github.com/lucas-clemente/quic-go/sys_conn_df_windows.go b/vendor/github.com/quic-go/quic-go/sys_conn_df_windows.go similarity index 96% rename from vendor/github.com/lucas-clemente/quic-go/sys_conn_df_windows.go rename to vendor/github.com/quic-go/quic-go/sys_conn_df_windows.go index a83025c0..9855e8de 100644 --- a/vendor/github.com/lucas-clemente/quic-go/sys_conn_df_windows.go +++ b/vendor/github.com/quic-go/quic-go/sys_conn_df_windows.go @@ -6,8 +6,9 @@ import ( "errors" "syscall" - "github.com/lucas-clemente/quic-go/internal/utils" "golang.org/x/sys/windows" + + "github.com/quic-go/quic-go/internal/utils" ) const ( diff --git a/vendor/github.com/lucas-clemente/quic-go/sys_conn_helper_darwin.go b/vendor/github.com/quic-go/quic-go/sys_conn_helper_darwin.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/sys_conn_helper_darwin.go rename to vendor/github.com/quic-go/quic-go/sys_conn_helper_darwin.go diff --git a/vendor/github.com/lucas-clemente/quic-go/sys_conn_helper_freebsd.go b/vendor/github.com/quic-go/quic-go/sys_conn_helper_freebsd.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/sys_conn_helper_freebsd.go rename to vendor/github.com/quic-go/quic-go/sys_conn_helper_freebsd.go diff --git a/vendor/github.com/lucas-clemente/quic-go/sys_conn_helper_linux.go b/vendor/github.com/quic-go/quic-go/sys_conn_helper_linux.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/sys_conn_helper_linux.go rename to vendor/github.com/quic-go/quic-go/sys_conn_helper_linux.go diff --git a/vendor/github.com/lucas-clemente/quic-go/sys_conn_no_oob.go b/vendor/github.com/quic-go/quic-go/sys_conn_no_oob.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/sys_conn_no_oob.go rename to vendor/github.com/quic-go/quic-go/sys_conn_no_oob.go diff --git a/vendor/github.com/lucas-clemente/quic-go/sys_conn_oob.go b/vendor/github.com/quic-go/quic-go/sys_conn_oob.go similarity index 98% rename from vendor/github.com/lucas-clemente/quic-go/sys_conn_oob.go rename to vendor/github.com/quic-go/quic-go/sys_conn_oob.go index 71b24fc9..806dfb81 100644 --- a/vendor/github.com/lucas-clemente/quic-go/sys_conn_oob.go +++ b/vendor/github.com/quic-go/quic-go/sys_conn_oob.go @@ -14,8 +14,8 @@ import ( "golang.org/x/net/ipv6" "golang.org/x/sys/unix" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/utils" ) const ( diff --git a/vendor/github.com/lucas-clemente/quic-go/sys_conn_windows.go b/vendor/github.com/quic-go/quic-go/sys_conn_windows.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/sys_conn_windows.go rename to vendor/github.com/quic-go/quic-go/sys_conn_windows.go diff --git a/vendor/github.com/lucas-clemente/quic-go/token_store.go b/vendor/github.com/quic-go/quic-go/token_store.go similarity index 84% rename from vendor/github.com/lucas-clemente/quic-go/token_store.go rename to vendor/github.com/quic-go/quic-go/token_store.go index 9641dc5a..00460e50 100644 --- a/vendor/github.com/lucas-clemente/quic-go/token_store.go +++ b/vendor/github.com/quic-go/quic-go/token_store.go @@ -1,10 +1,10 @@ package quic import ( - "container/list" "sync" - "github.com/lucas-clemente/quic-go/internal/utils" + "github.com/quic-go/quic-go/internal/utils" + list "github.com/quic-go/quic-go/internal/utils/linkedlist" ) type singleOriginTokenStore struct { @@ -48,8 +48,8 @@ type lruTokenStoreEntry struct { type lruTokenStore struct { mutex sync.Mutex - m map[string]*list.Element - q *list.List + m map[string]*list.Element[*lruTokenStoreEntry] + q *list.List[*lruTokenStoreEntry] capacity int singleOriginSize int } @@ -61,8 +61,8 @@ var _ TokenStore = &lruTokenStore{} // tokensPerOrigin specifies the maximum number of tokens per origin. func NewLRUTokenStore(maxOrigins, tokensPerOrigin int) TokenStore { return &lruTokenStore{ - m: make(map[string]*list.Element), - q: list.New(), + m: make(map[string]*list.Element[*lruTokenStoreEntry]), + q: list.New[*lruTokenStoreEntry](), capacity: maxOrigins, singleOriginSize: tokensPerOrigin, } @@ -73,7 +73,7 @@ func (s *lruTokenStore) Put(key string, token *ClientToken) { defer s.mutex.Unlock() if el, ok := s.m[key]; ok { - entry := el.Value.(*lruTokenStoreEntry) + entry := el.Value entry.cache.Add(token) s.q.MoveToFront(el) return @@ -90,7 +90,7 @@ func (s *lruTokenStore) Put(key string, token *ClientToken) { } elem := s.q.Back() - entry := elem.Value.(*lruTokenStoreEntry) + entry := elem.Value delete(s.m, entry.key) entry.key = key entry.cache = newSingleOriginTokenStore(s.singleOriginSize) @@ -106,7 +106,7 @@ func (s *lruTokenStore) Pop(key string) *ClientToken { var token *ClientToken if el, ok := s.m[key]; ok { s.q.MoveToFront(el) - cache := el.Value.(*lruTokenStoreEntry).cache + cache := el.Value.cache token = cache.Pop() if cache.Len() == 0 { s.q.Remove(el) diff --git a/vendor/github.com/lucas-clemente/quic-go/tools.go b/vendor/github.com/quic-go/quic-go/tools.go similarity index 100% rename from vendor/github.com/lucas-clemente/quic-go/tools.go rename to vendor/github.com/quic-go/quic-go/tools.go diff --git a/vendor/github.com/lucas-clemente/quic-go/window_update_queue.go b/vendor/github.com/quic-go/quic-go/window_update_queue.go similarity index 91% rename from vendor/github.com/lucas-clemente/quic-go/window_update_queue.go rename to vendor/github.com/quic-go/quic-go/window_update_queue.go index 2abcf673..9ed12143 100644 --- a/vendor/github.com/lucas-clemente/quic-go/window_update_queue.go +++ b/vendor/github.com/quic-go/quic-go/window_update_queue.go @@ -3,9 +3,9 @@ package quic import ( "sync" - "github.com/lucas-clemente/quic-go/internal/flowcontrol" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/wire" + "github.com/quic-go/quic-go/internal/flowcontrol" + "github.com/quic-go/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/wire" ) type windowUpdateQueue struct { diff --git a/vendor/github.com/lucas-clemente/quic-go/zero_rtt_queue.go b/vendor/github.com/quic-go/quic-go/zero_rtt_queue.go similarity index 92% rename from vendor/github.com/lucas-clemente/quic-go/zero_rtt_queue.go rename to vendor/github.com/quic-go/quic-go/zero_rtt_queue.go index 7ad7ee10..b81a936e 100644 --- a/vendor/github.com/lucas-clemente/quic-go/zero_rtt_queue.go +++ b/vendor/github.com/quic-go/quic-go/zero_rtt_queue.go @@ -3,7 +3,7 @@ package quic import ( "time" - "github.com/lucas-clemente/quic-go/internal/protocol" + "github.com/quic-go/quic-go/internal/protocol" ) type zeroRTTQueue struct { diff --git a/vendor/github.com/quic-go/webtransport-go/.gitignore b/vendor/github.com/quic-go/webtransport-go/.gitignore new file mode 100644 index 00000000..8cbead44 --- /dev/null +++ b/vendor/github.com/quic-go/webtransport-go/.gitignore @@ -0,0 +1 @@ +qlog/ diff --git a/vendor/github.com/quic-go/webtransport-go/LICENSE b/vendor/github.com/quic-go/webtransport-go/LICENSE new file mode 100644 index 00000000..0e78adcc --- /dev/null +++ b/vendor/github.com/quic-go/webtransport-go/LICENSE @@ -0,0 +1,7 @@ +Copyright 2022 Marten Seemann + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/quic-go/webtransport-go/README.md b/vendor/github.com/quic-go/webtransport-go/README.md new file mode 100644 index 00000000..c655847b --- /dev/null +++ b/vendor/github.com/quic-go/webtransport-go/README.md @@ -0,0 +1,39 @@ +# webtransport-go + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/quic-go/webtransport-go)](https://pkg.go.dev/github.com/quic-go/webtransport-go) +[![Code Coverage](https://img.shields.io/codecov/c/github/quic-go/webtransport-go/master.svg?style=flat-square)](https://codecov.io/gh/quic-go/webtransport-go/) + +webtransport-go is an implementation of the WebTransport protocol, based on [quic-go](https://github.com/quic-go/quic-go). It currently implements [draft-02](https://www.ietf.org/archive/id/draft-ietf-webtrans-http3-02.html) of the specification. + +## Running a Server + +```go +// create a new webtransport.Server, listening on (UDP) port 443 +s := webtransport.Server{ + H3: http3.Server{Addr: ":443"}, +} + +// Create a new HTTP endpoint /webtransport. +http.HandleFunc("/webtransport", func(w http.ResponseWriter, r *http.Request) { + conn, err := s.Upgrade(w, r) + if err != nil { + log.Printf("upgrading failed: %s", err) + w.WriteHeader(500) + return + } + // Handle the connection. Here goes the application logic. +}) + +s.ListenAndServeTLS(certFile, keyFile) +``` + +Now that the server is running, Chrome can be used to establish a new WebTransport session as described in [this tutorial](https://web.dev/webtransport/). + +## Running a Client + +```go +var d webtransport.Dialer +rsp, conn, err := d.Dial(ctx, "https://example.com/webtransport", nil) +// err is only nil if rsp.StatusCode is a 2xx +// Handle the connection. Here goes the application logic. +``` diff --git a/vendor/github.com/quic-go/webtransport-go/client.go b/vendor/github.com/quic-go/webtransport-go/client.go new file mode 100644 index 00000000..ffcb7497 --- /dev/null +++ b/vendor/github.com/quic-go/webtransport-go/client.go @@ -0,0 +1,124 @@ +package webtransport + +import ( + "context" + "fmt" + "net/http" + "net/url" + "sync" + "time" + + "github.com/quic-go/quic-go" + "github.com/quic-go/quic-go/http3" + "github.com/quic-go/quic-go/quicvarint" +) + +type Dialer struct { + // If not set, reasonable defaults will be used. + // In order for WebTransport to function, this implementation will: + // * overwrite the StreamHijacker and UniStreamHijacker + // * enable datagram support + // * set the MaxIncomingStreams to 100 on the quic.Config, if unset + *http3.RoundTripper + + // StreamReorderingTime is the time an incoming WebTransport stream that cannot be associated + // with a session is buffered. + // This can happen if the response to a CONNECT request (that creates a new session) is reordered, + // and arrives after the first WebTransport stream(s) for that session. + // Defaults to 5 seconds. + StreamReorderingTimeout time.Duration + + ctx context.Context + ctxCancel context.CancelFunc + + initOnce sync.Once + + conns sessionManager +} + +func (d *Dialer) init() { + timeout := d.StreamReorderingTimeout + if timeout == 0 { + timeout = 5 * time.Second + } + d.conns = *newSessionManager(timeout) + d.ctx, d.ctxCancel = context.WithCancel(context.Background()) + if d.RoundTripper == nil { + d.RoundTripper = &http3.RoundTripper{} + } + d.RoundTripper.EnableDatagrams = true + if d.RoundTripper.AdditionalSettings == nil { + d.RoundTripper.AdditionalSettings = make(map[uint64]uint64) + } + d.RoundTripper.StreamHijacker = func(ft http3.FrameType, conn quic.Connection, str quic.Stream, e error) (hijacked bool, err error) { + if isWebTransportError(e) { + return true, nil + } + if ft != webTransportFrameType { + return false, nil + } + id, err := quicvarint.Read(quicvarint.NewReader(str)) + if err != nil { + if isWebTransportError(err) { + return true, nil + } + return false, err + } + d.conns.AddStream(conn, str, sessionID(id)) + return true, nil + } + d.RoundTripper.UniStreamHijacker = func(st http3.StreamType, conn quic.Connection, str quic.ReceiveStream, err error) (hijacked bool) { + if st != webTransportUniStreamType && !isWebTransportError(err) { + return false + } + d.conns.AddUniStream(conn, str) + return true + } + if d.QuicConfig == nil { + d.QuicConfig = &quic.Config{} + } + if d.QuicConfig.MaxIncomingStreams == 0 { + d.QuicConfig.MaxIncomingStreams = 100 + } +} + +func (d *Dialer) Dial(ctx context.Context, urlStr string, reqHdr http.Header) (*http.Response, *Session, error) { + d.initOnce.Do(func() { d.init() }) + + u, err := url.Parse(urlStr) + if err != nil { + return nil, nil, err + } + if reqHdr == nil { + reqHdr = http.Header{} + } + reqHdr.Add(webTransportDraftOfferHeaderKey, "1") + req := &http.Request{ + Method: http.MethodConnect, + Header: reqHdr, + Proto: "webtransport", + Host: u.Host, + URL: u, + } + req = req.WithContext(ctx) + + rsp, err := d.RoundTripper.RoundTripOpt(req, http3.RoundTripOpt{DontCloseRequestStream: true}) + if err != nil { + return nil, nil, err + } + if rsp.StatusCode < 200 || rsp.StatusCode >= 300 { + return rsp, nil, fmt.Errorf("received status %d", rsp.StatusCode) + } + str := rsp.Body.(http3.HTTPStreamer).HTTPStream() + conn := d.conns.AddSession( + rsp.Body.(http3.Hijacker).StreamCreator(), + sessionID(str.StreamID()), + str, + ) + return rsp, conn, nil +} + +func (d *Dialer) Close() error { + d.ctxCancel() + return nil +} diff --git a/vendor/github.com/quic-go/webtransport-go/codecov.yml b/vendor/github.com/quic-go/webtransport-go/codecov.yml new file mode 100644 index 00000000..ad40e757 --- /dev/null +++ b/vendor/github.com/quic-go/webtransport-go/codecov.yml @@ -0,0 +1,9 @@ +comment: false +coverage: + status: + patch: + default: + informational: true + project: + default: + informational: true diff --git a/vendor/github.com/quic-go/webtransport-go/errors.go b/vendor/github.com/quic-go/webtransport-go/errors.go new file mode 100644 index 00000000..9929513e --- /dev/null +++ b/vendor/github.com/quic-go/webtransport-go/errors.go @@ -0,0 +1,78 @@ +package webtransport + +import ( + "errors" + "fmt" + + "github.com/quic-go/quic-go" +) + +// StreamErrorCode is an error code used for stream termination. +type StreamErrorCode uint8 + +// SessionErrorCode is an error code for session termination. +type SessionErrorCode uint32 + +const ( + firstErrorCode = 0x52e4a40fa8db + lastErrorCode = 0x52e4a40fa9e2 +) + +func webtransportCodeToHTTPCode(n StreamErrorCode) quic.StreamErrorCode { + return quic.StreamErrorCode(firstErrorCode) + quic.StreamErrorCode(n) + quic.StreamErrorCode(n/0x1e) +} + +func httpCodeToWebtransportCode(h quic.StreamErrorCode) (StreamErrorCode, error) { + if h < firstErrorCode || h > lastErrorCode { + return 0, errors.New("error code outside of expected range") + } + if (h-0x21)%0x1f == 0 { + return 0, errors.New("invalid error code") + } + shifted := h - firstErrorCode + return StreamErrorCode(shifted - shifted/0x1f), nil +} + +func isWebTransportError(e error) bool { + if e == nil { + return false + } + var strErr *quic.StreamError + if !errors.As(e, &strErr) { + return false + } + if strErr.ErrorCode == sessionCloseErrorCode { + return true + } + _, err := httpCodeToWebtransportCode(strErr.ErrorCode) + return err == nil +} + +// WebTransportBufferedStreamRejectedErrorCode is the error code of the +// H3_WEBTRANSPORT_BUFFERED_STREAM_REJECTED error. +const WebTransportBufferedStreamRejectedErrorCode quic.StreamErrorCode = 0x3994bd84 + +// StreamError is the error that is returned from stream operations (Read, Write) when the stream is canceled. +type StreamError struct { + ErrorCode StreamErrorCode +} + +func (e *StreamError) Is(target error) bool { + _, ok := target.(*StreamError) + return ok +} + +func (e *StreamError) Error() string { + return fmt.Sprintf("stream canceled with error code %d", e.ErrorCode) +} + +// ConnectionError is a WebTransport connection error. +type ConnectionError struct { + Remote bool + ErrorCode SessionErrorCode + Message string +} + +var _ error = &ConnectionError{} + +func (e *ConnectionError) Error() string { return e.Message } diff --git a/vendor/github.com/quic-go/webtransport-go/protocol.go b/vendor/github.com/quic-go/webtransport-go/protocol.go new file mode 100644 index 00000000..1770f26e --- /dev/null +++ b/vendor/github.com/quic-go/webtransport-go/protocol.go @@ -0,0 +1,5 @@ +package webtransport + +const settingsEnableWebtransport = 0x2b603742 + +const protocolHeader = "webtransport" diff --git a/vendor/github.com/quic-go/webtransport-go/server.go b/vendor/github.com/quic-go/webtransport-go/server.go new file mode 100644 index 00000000..7b61346a --- /dev/null +++ b/vendor/github.com/quic-go/webtransport-go/server.go @@ -0,0 +1,227 @@ +package webtransport + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "net/url" + "sync" + "time" + "unicode/utf8" + + "github.com/quic-go/quic-go" + "github.com/quic-go/quic-go/http3" + "github.com/quic-go/quic-go/quicvarint" +) + +const ( + webTransportDraftOfferHeaderKey = "Sec-Webtransport-Http3-Draft02" + webTransportDraftHeaderKey = "Sec-Webtransport-Http3-Draft" + webTransportDraftHeaderValue = "draft02" +) + +const ( + webTransportFrameType = 0x41 + webTransportUniStreamType = 0x54 +) + +type Server struct { + H3 http3.Server + + // StreamReorderingTime is the time an incoming WebTransport stream that cannot be associated + // with a session is buffered. + // This can happen if the CONNECT request (that creates a new session) is reordered, and arrives + // after the first WebTransport stream(s) for that session. + // Defaults to 5 seconds. + StreamReorderingTimeout time.Duration + + // CheckOrigin is used to validate the request origin, thereby preventing cross-site request forgery. + // CheckOrigin returns true if the request Origin header is acceptable. + // If unset, a safe default is used: If the Origin header is set, it is checked that it + // matches the request's Host header. + CheckOrigin func(r *http.Request) bool + + ctx context.Context // is closed when Close is called + ctxCancel context.CancelFunc + refCount sync.WaitGroup + + initOnce sync.Once + initErr error + + conns *sessionManager +} + +func (s *Server) initialize() error { + s.initOnce.Do(func() { + s.initErr = s.init() + }) + return s.initErr +} + +func (s *Server) init() error { + s.ctx, s.ctxCancel = context.WithCancel(context.Background()) + timeout := s.StreamReorderingTimeout + if timeout == 0 { + timeout = 5 * time.Second + } + s.conns = newSessionManager(timeout) + if s.CheckOrigin == nil { + s.CheckOrigin = checkSameOrigin + } + + // configure the http3.Server + if s.H3.AdditionalSettings == nil { + s.H3.AdditionalSettings = make(map[uint64]uint64) + } + s.H3.AdditionalSettings[settingsEnableWebtransport] = 1 + s.H3.EnableDatagrams = true + if s.H3.StreamHijacker != nil { + return errors.New("StreamHijacker already set") + } + s.H3.StreamHijacker = func(ft http3.FrameType, qconn quic.Connection, str quic.Stream, err error) (bool /* hijacked */, error) { + if isWebTransportError(err) { + return true, nil + } + if ft != webTransportFrameType { + return false, nil + } + // Reading the varint might block if the peer sends really small frames, but this is fine. + // This function is called from the HTTP/3 request handler, which runs in its own Go routine. + id, err := quicvarint.Read(quicvarint.NewReader(str)) + if err != nil { + if isWebTransportError(err) { + return true, nil + } + return false, err + } + s.conns.AddStream(qconn, str, sessionID(id)) + return true, nil + } + s.H3.UniStreamHijacker = func(st http3.StreamType, qconn quic.Connection, str quic.ReceiveStream, err error) (hijacked bool) { + if st != webTransportUniStreamType && !isWebTransportError(err) { + return false + } + s.conns.AddUniStream(qconn, str) + return true + } + return nil +} + +func (s *Server) Serve(conn net.PacketConn) error { + if err := s.initialize(); err != nil { + return err + } + return s.H3.Serve(conn) +} + +// ServeQUICConn serves a single QUIC connection. +func (s *Server) ServeQUICConn(conn quic.Connection) error { + if err := s.initialize(); err != nil { + return err + } + return s.H3.ServeQUICConn(conn) +} + +func (s *Server) ListenAndServe() error { + if err := s.initialize(); err != nil { + return err + } + return s.H3.ListenAndServe() +} + +func (s *Server) ListenAndServeTLS(certFile, keyFile string) error { + if err := s.initialize(); err != nil { + return err + } + return s.H3.ListenAndServeTLS(certFile, keyFile) +} + +func (s *Server) Close() error { + // Make sure that ctxCancel is defined. + // This is expected to be uncommon. + // It only happens if the server is closed without Serve / ListenAndServe having been called. + s.initOnce.Do(func() {}) + + if s.ctxCancel != nil { + s.ctxCancel() + } + if s.conns != nil { + s.conns.Close() + } + err := s.H3.Close() + s.refCount.Wait() + return err +} + +func (s *Server) Upgrade(w http.ResponseWriter, r *http.Request) (*Session, error) { + if r.Method != http.MethodConnect { + return nil, fmt.Errorf("expected CONNECT request, got %s", r.Method) + } + if r.Proto != protocolHeader { + return nil, fmt.Errorf("unexpected protocol: %s", r.Proto) + } + if v, ok := r.Header[webTransportDraftOfferHeaderKey]; !ok || len(v) != 1 || v[0] != "1" { + return nil, fmt.Errorf("missing or invalid %s header", webTransportDraftOfferHeaderKey) + } + if !s.CheckOrigin(r) { + return nil, errors.New("webtransport: request origin not allowed") + } + w.Header().Add(webTransportDraftHeaderKey, webTransportDraftHeaderValue) + w.WriteHeader(http.StatusOK) + w.(http.Flusher).Flush() + + httpStreamer, ok := r.Body.(http3.HTTPStreamer) + if !ok { // should never happen, unless quic-go changed the API + return nil, errors.New("failed to take over HTTP stream") + } + str := httpStreamer.HTTPStream() + sID := sessionID(str.StreamID()) + + hijacker, ok := w.(http3.Hijacker) + if !ok { // should never happen, unless quic-go changed the API + return nil, errors.New("failed to hijack") + } + return s.conns.AddSession( + hijacker.StreamCreator(), + sID, + r.Body.(http3.HTTPStreamer).HTTPStream(), + ), nil +} + +// copied from https://github.com/gorilla/websocket +func checkSameOrigin(r *http.Request) bool { + origin := r.Header.Get("Origin") + if origin == "" { + return true + } + u, err := url.Parse(origin) + if err != nil { + return false + } + return equalASCIIFold(u.Host, r.Host) +} + +// copied from https://github.com/gorilla/websocket +func equalASCIIFold(s, t string) bool { + for s != "" && t != "" { + sr, size := utf8.DecodeRuneInString(s) + s = s[size:] + tr, size := utf8.DecodeRuneInString(t) + t = t[size:] + if sr == tr { + continue + } + if 'A' <= sr && sr <= 'Z' { + sr = sr + 'a' - 'A' + } + if 'A' <= tr && tr <= 'Z' { + tr = tr + 'a' - 'A' + } + if sr != tr { + return false + } + } + return s == t +} diff --git a/vendor/github.com/quic-go/webtransport-go/session.go b/vendor/github.com/quic-go/webtransport-go/session.go new file mode 100644 index 00000000..f440059c --- /dev/null +++ b/vendor/github.com/quic-go/webtransport-go/session.go @@ -0,0 +1,418 @@ +package webtransport + +import ( + "context" + "encoding/binary" + "errors" + "io" + "math/rand" + "net" + "sync" + + "github.com/quic-go/quic-go" + "github.com/quic-go/quic-go/http3" + "github.com/quic-go/quic-go/quicvarint" +) + +// sessionID is the WebTransport Session ID +type sessionID uint64 + +const closeWebtransportSessionCapsuleType http3.CapsuleType = 0x2843 + +type acceptQueue[T any] struct { + mx sync.Mutex + // The channel is used to notify consumers (via Chan) about new incoming items. + // Needs to be buffered to preserve the notification if an item is enqueued + // between a call to Next and to Chan. + c chan struct{} + // Contains all the streams waiting to be accepted. + // There's no explicit limit to the length of the queue, but it is implicitly + // limited by the stream flow control provided by QUIC. + queue []T +} + +func newAcceptQueue[T any]() *acceptQueue[T] { + return &acceptQueue[T]{c: make(chan struct{}, 1)} +} + +func (q *acceptQueue[T]) Add(str T) { + q.mx.Lock() + q.queue = append(q.queue, str) + q.mx.Unlock() + + select { + case q.c <- struct{}{}: + default: + } +} + +func (q *acceptQueue[T]) Next() T { + q.mx.Lock() + defer q.mx.Unlock() + + if len(q.queue) == 0 { + return *new(T) + } + str := q.queue[0] + q.queue = q.queue[1:] + return str +} + +func (q *acceptQueue[T]) Chan() <-chan struct{} { return q.c } + +type Session struct { + sessionID sessionID + qconn http3.StreamCreator + requestStr quic.Stream + + streamHdr []byte + uniStreamHdr []byte + + ctx context.Context + closeMx sync.Mutex + closeErr error // not nil once the session is closed + // streamCtxs holds all the context.CancelFuncs of calls to Open{Uni}StreamSync calls currently active. + // When the session is closed, this allows us to cancel all these contexts and make those calls return. + streamCtxs map[int]context.CancelFunc + + bidiAcceptQueue acceptQueue[Stream] + uniAcceptQueue acceptQueue[ReceiveStream] + + // TODO: garbage collect streams from when they are closed + streams streamsMap +} + +func newSession(sessionID sessionID, qconn http3.StreamCreator, requestStr quic.Stream) *Session { + tracingID := qconn.Context().Value(quic.ConnectionTracingKey).(uint64) + ctx, ctxCancel := context.WithCancel(context.WithValue(context.Background(), quic.ConnectionTracingKey, tracingID)) + c := &Session{ + sessionID: sessionID, + qconn: qconn, + requestStr: requestStr, + ctx: ctx, + streamCtxs: make(map[int]context.CancelFunc), + bidiAcceptQueue: *newAcceptQueue[Stream](), + uniAcceptQueue: *newAcceptQueue[ReceiveStream](), + streams: *newStreamsMap(), + } + // precompute the headers for unidirectional streams + c.uniStreamHdr = make([]byte, 0, 2+quicvarint.Len(uint64(c.sessionID))) + c.uniStreamHdr = quicvarint.Append(c.uniStreamHdr, webTransportUniStreamType) + c.uniStreamHdr = quicvarint.Append(c.uniStreamHdr, uint64(c.sessionID)) + // precompute the headers for bidirectional streams + c.streamHdr = make([]byte, 0, 2+quicvarint.Len(uint64(c.sessionID))) + c.streamHdr = quicvarint.Append(c.streamHdr, webTransportFrameType) + c.streamHdr = quicvarint.Append(c.streamHdr, uint64(c.sessionID)) + + go func() { + defer ctxCancel() + c.handleConn() + }() + return c +} + +func (s *Session) handleConn() { + var closeErr *ConnectionError + err := s.parseNextCapsule() + if !errors.As(err, &closeErr) { + closeErr = &ConnectionError{Remote: true} + } + + s.closeMx.Lock() + defer s.closeMx.Unlock() + // If we closed the connection, the closeErr will be set in Close. + if s.closeErr == nil { + s.closeErr = closeErr + } + for _, cancel := range s.streamCtxs { + cancel() + } + s.streams.CloseSession() +} + +// parseNextCapsule parses the next Capsule sent on the request stream. +// It returns a ConnectionError, if the capsule received is a CLOSE_WEBTRANSPORT_SESSION Capsule. +func (s *Session) parseNextCapsule() error { + for { + // TODO: enforce max size + typ, r, err := http3.ParseCapsule(quicvarint.NewReader(s.requestStr)) + if err != nil { + return err + } + switch typ { + case closeWebtransportSessionCapsuleType: + b := make([]byte, 4) + if _, err := io.ReadFull(r, b); err != nil { + return err + } + appErrCode := binary.BigEndian.Uint32(b) + appErrMsg, err := io.ReadAll(r) + if err != nil { + return err + } + return &ConnectionError{ + Remote: true, + ErrorCode: SessionErrorCode(appErrCode), + Message: string(appErrMsg), + } + default: + // unknown capsule, skip it + if _, err := io.ReadAll(r); err != nil { + return err + } + } + } +} + +func (s *Session) addStream(qstr quic.Stream, addStreamHeader bool) Stream { + var hdr []byte + if addStreamHeader { + hdr = s.streamHdr + } + str := newStream(qstr, hdr, func() { s.streams.RemoveStream(qstr.StreamID()) }) + s.streams.AddStream(qstr.StreamID(), str.closeWithSession) + return str +} + +func (s *Session) addReceiveStream(qstr quic.ReceiveStream) ReceiveStream { + str := newReceiveStream(qstr, func() { s.streams.RemoveStream(qstr.StreamID()) }) + s.streams.AddStream(qstr.StreamID(), func() { + str.closeWithSession() + }) + return str +} + +func (s *Session) addSendStream(qstr quic.SendStream) SendStream { + str := newSendStream(qstr, s.uniStreamHdr, func() { s.streams.RemoveStream(qstr.StreamID()) }) + s.streams.AddStream(qstr.StreamID(), str.closeWithSession) + return str +} + +// addIncomingStream adds a bidirectional stream that the remote peer opened +func (s *Session) addIncomingStream(qstr quic.Stream) { + s.closeMx.Lock() + closeErr := s.closeErr + if closeErr != nil { + s.closeMx.Unlock() + qstr.CancelRead(sessionCloseErrorCode) + qstr.CancelWrite(sessionCloseErrorCode) + return + } + str := s.addStream(qstr, false) + s.closeMx.Unlock() + + s.bidiAcceptQueue.Add(str) +} + +// addIncomingUniStream adds a unidirectional stream that the remote peer opened +func (s *Session) addIncomingUniStream(qstr quic.ReceiveStream) { + s.closeMx.Lock() + closeErr := s.closeErr + if closeErr != nil { + s.closeMx.Unlock() + qstr.CancelRead(sessionCloseErrorCode) + return + } + str := s.addReceiveStream(qstr) + s.closeMx.Unlock() + + s.uniAcceptQueue.Add(str) +} + +// Context returns a context that is closed when the session is closed. +func (s *Session) Context() context.Context { + return s.ctx +} + +func (s *Session) AcceptStream(ctx context.Context) (Stream, error) { + s.closeMx.Lock() + closeErr := s.closeErr + s.closeMx.Unlock() + if closeErr != nil { + return nil, closeErr + } + + for { + // If there's a stream in the accept queue, return it immediately. + if str := s.bidiAcceptQueue.Next(); str != nil { + return str, nil + } + // No stream in the accept queue. Wait until we accept one. + select { + case <-s.ctx.Done(): + return nil, s.closeErr + case <-ctx.Done(): + return nil, ctx.Err() + case <-s.bidiAcceptQueue.Chan(): + } + } +} + +func (s *Session) AcceptUniStream(ctx context.Context) (ReceiveStream, error) { + s.closeMx.Lock() + closeErr := s.closeErr + s.closeMx.Unlock() + if closeErr != nil { + return nil, s.closeErr + } + + for { + // If there's a stream in the accept queue, return it immediately. + if str := s.uniAcceptQueue.Next(); str != nil { + return str, nil + } + // No stream in the accept queue. Wait until we accept one. + select { + case <-s.ctx.Done(): + return nil, s.closeErr + case <-ctx.Done(): + return nil, ctx.Err() + case <-s.uniAcceptQueue.Chan(): + } + } +} + +func (s *Session) OpenStream() (Stream, error) { + s.closeMx.Lock() + defer s.closeMx.Unlock() + + if s.closeErr != nil { + return nil, s.closeErr + } + + qstr, err := s.qconn.OpenStream() + if err != nil { + return nil, err + } + return s.addStream(qstr, true), nil +} + +func (s *Session) addStreamCtxCancel(cancel context.CancelFunc) (id int) { +rand: + id = rand.Int() + if _, ok := s.streamCtxs[id]; ok { + goto rand + } + s.streamCtxs[id] = cancel + return id +} + +func (s *Session) OpenStreamSync(ctx context.Context) (Stream, error) { + s.closeMx.Lock() + if s.closeErr != nil { + s.closeMx.Unlock() + return nil, s.closeErr + } + ctx, cancel := context.WithCancel(ctx) + id := s.addStreamCtxCancel(cancel) + s.closeMx.Unlock() + + qstr, err := s.qconn.OpenStreamSync(ctx) + if err != nil { + if s.closeErr != nil { + return nil, s.closeErr + } + return nil, err + } + + s.closeMx.Lock() + defer s.closeMx.Unlock() + delete(s.streamCtxs, id) + // Some time might have passed. Check if the session is still alive + if s.closeErr != nil { + qstr.CancelWrite(sessionCloseErrorCode) + qstr.CancelRead(sessionCloseErrorCode) + return nil, s.closeErr + } + return s.addStream(qstr, true), nil +} + +func (s *Session) OpenUniStream() (SendStream, error) { + s.closeMx.Lock() + defer s.closeMx.Unlock() + + if s.closeErr != nil { + return nil, s.closeErr + } + qstr, err := s.qconn.OpenUniStream() + if err != nil { + return nil, err + } + return s.addSendStream(qstr), nil +} + +func (s *Session) OpenUniStreamSync(ctx context.Context) (str SendStream, err error) { + s.closeMx.Lock() + if s.closeErr != nil { + s.closeMx.Unlock() + return nil, s.closeErr + } + ctx, cancel := context.WithCancel(ctx) + id := s.addStreamCtxCancel(cancel) + s.closeMx.Unlock() + + qstr, err := s.qconn.OpenUniStreamSync(ctx) + if err != nil { + if s.closeErr != nil { + return nil, s.closeErr + } + return nil, err + } + + s.closeMx.Lock() + defer s.closeMx.Unlock() + delete(s.streamCtxs, id) + // Some time might have passed. Check if the session is still alive + if s.closeErr != nil { + qstr.CancelWrite(sessionCloseErrorCode) + return nil, s.closeErr + } + return s.addSendStream(qstr), nil +} + +func (s *Session) LocalAddr() net.Addr { + return s.qconn.LocalAddr() +} + +func (s *Session) RemoteAddr() net.Addr { + return s.qconn.RemoteAddr() +} + +func (s *Session) CloseWithError(code SessionErrorCode, msg string) error { + first, err := s.closeWithError(code, msg) + if err != nil || !first { + return err + } + + s.requestStr.CancelRead(1337) + err = s.requestStr.Close() + <-s.ctx.Done() + return err +} + +func (s *Session) closeWithError(code SessionErrorCode, msg string) (bool /* first call to close session */, error) { + s.closeMx.Lock() + defer s.closeMx.Unlock() + // Duplicate call, or the remote already closed this session. + if s.closeErr != nil { + return false, nil + } + s.closeErr = &ConnectionError{ + ErrorCode: code, + Message: msg, + } + + b := make([]byte, 4, 4+len(msg)) + binary.BigEndian.PutUint32(b, uint32(code)) + b = append(b, []byte(msg)...) + + return true, http3.WriteCapsule( + quicvarint.NewWriter(s.requestStr), + closeWebtransportSessionCapsuleType, + b, + ) +} + +func (c *Session) ConnectionState() quic.ConnectionState { + return c.qconn.ConnectionState() +} diff --git a/vendor/github.com/quic-go/webtransport-go/session_manager.go b/vendor/github.com/quic-go/webtransport-go/session_manager.go new file mode 100644 index 00000000..2dbb7381 --- /dev/null +++ b/vendor/github.com/quic-go/webtransport-go/session_manager.go @@ -0,0 +1,195 @@ +package webtransport + +import ( + "context" + "sync" + "time" + + "github.com/quic-go/quic-go" + "github.com/quic-go/quic-go/http3" + "github.com/quic-go/quic-go/quicvarint" +) + +// session is the map value in the conns map +type session struct { + created chan struct{} // is closed once the session map has been initialized + counter int // how many streams are waiting for this session to be established + conn *Session +} + +type sessionManager struct { + refCount sync.WaitGroup + ctx context.Context + ctxCancel context.CancelFunc + + timeout time.Duration + + mx sync.Mutex + conns map[http3.StreamCreator]map[sessionID]*session +} + +func newSessionManager(timeout time.Duration) *sessionManager { + m := &sessionManager{ + timeout: timeout, + conns: make(map[http3.StreamCreator]map[sessionID]*session), + } + m.ctx, m.ctxCancel = context.WithCancel(context.Background()) + return m +} + +// AddStream adds a new bidirectional stream to a WebTransport session. +// If the WebTransport session has not yet been established, +// it starts a new go routine and waits for establishment of the session. +// If that takes longer than timeout, the stream is reset. +func (m *sessionManager) AddStream(qconn http3.StreamCreator, str quic.Stream, id sessionID) { + sess, isExisting := m.getOrCreateSession(qconn, id) + if isExisting { + sess.conn.addIncomingStream(str) + return + } + + m.refCount.Add(1) + go func() { + defer m.refCount.Done() + m.handleStream(str, sess) + + m.mx.Lock() + defer m.mx.Unlock() + + sess.counter-- + // Once no more streams are waiting for this session to be established, + // and this session is still outstanding, delete it from the map. + if sess.counter == 0 && sess.conn == nil { + m.maybeDelete(qconn, id) + } + }() +} + +func (m *sessionManager) maybeDelete(qconn http3.StreamCreator, id sessionID) { + sessions, ok := m.conns[qconn] + if !ok { // should never happen + return + } + delete(sessions, id) + if len(sessions) == 0 { + delete(m.conns, qconn) + } +} + +// AddUniStream adds a new unidirectional stream to a WebTransport session. +// If the WebTransport session has not yet been established, +// it starts a new go routine and waits for establishment of the session. +// If that takes longer than timeout, the stream is reset. +func (m *sessionManager) AddUniStream(qconn http3.StreamCreator, str quic.ReceiveStream) { + idv, err := quicvarint.Read(quicvarint.NewReader(str)) + if err != nil { + str.CancelRead(1337) + } + id := sessionID(idv) + + sess, isExisting := m.getOrCreateSession(qconn, id) + if isExisting { + sess.conn.addIncomingUniStream(str) + return + } + + m.refCount.Add(1) + go func() { + defer m.refCount.Done() + m.handleUniStream(str, sess) + + m.mx.Lock() + defer m.mx.Unlock() + + sess.counter-- + // Once no more streams are waiting for this session to be established, + // and this session is still outstanding, delete it from the map. + if sess.counter == 0 && sess.conn == nil { + m.maybeDelete(qconn, id) + } + }() +} + +func (m *sessionManager) getOrCreateSession(qconn http3.StreamCreator, id sessionID) (sess *session, existed bool) { + m.mx.Lock() + defer m.mx.Unlock() + + sessions, ok := m.conns[qconn] + if !ok { + sessions = make(map[sessionID]*session) + m.conns[qconn] = sessions + } + + sess, ok = sessions[id] + if ok && sess.conn != nil { + return sess, true + } + if !ok { + sess = &session{created: make(chan struct{})} + sessions[id] = sess + } + sess.counter++ + return sess, false +} + +func (m *sessionManager) handleStream(str quic.Stream, sess *session) { + t := time.NewTimer(m.timeout) + defer t.Stop() + + // When multiple streams are waiting for the same session to be established, + // the timeout is calculated for every stream separately. + select { + case <-sess.created: + sess.conn.addIncomingStream(str) + case <-t.C: + str.CancelRead(WebTransportBufferedStreamRejectedErrorCode) + str.CancelWrite(WebTransportBufferedStreamRejectedErrorCode) + case <-m.ctx.Done(): + } +} + +func (m *sessionManager) handleUniStream(str quic.ReceiveStream, sess *session) { + t := time.NewTimer(m.timeout) + defer t.Stop() + + // When multiple streams are waiting for the same session to be established, + // the timeout is calculated for every stream separately. + select { + case <-sess.created: + sess.conn.addIncomingUniStream(str) + case <-t.C: + str.CancelRead(WebTransportBufferedStreamRejectedErrorCode) + case <-m.ctx.Done(): + } +} + +// AddSession adds a new WebTransport session. +func (m *sessionManager) AddSession(qconn http3.StreamCreator, id sessionID, requestStr quic.Stream) *Session { + conn := newSession(id, qconn, requestStr) + + m.mx.Lock() + defer m.mx.Unlock() + + sessions, ok := m.conns[qconn] + if !ok { + sessions = make(map[sessionID]*session) + m.conns[qconn] = sessions + } + if sess, ok := sessions[id]; ok { + // We might already have an entry of this session. + // This can happen when we receive a stream for this WebTransport session before we complete the HTTP request + // that establishes the session. + sess.conn = conn + close(sess.created) + return conn + } + c := make(chan struct{}) + close(c) + sessions[id] = &session{created: c, conn: conn} + return conn +} + +func (m *sessionManager) Close() { + m.ctxCancel() + m.refCount.Wait() +} diff --git a/vendor/github.com/quic-go/webtransport-go/stream.go b/vendor/github.com/quic-go/webtransport-go/stream.go new file mode 100644 index 00000000..012ff088 --- /dev/null +++ b/vendor/github.com/quic-go/webtransport-go/stream.go @@ -0,0 +1,212 @@ +package webtransport + +import ( + "errors" + "fmt" + "io" + "net" + "sync" + "time" + + "github.com/quic-go/quic-go" +) + +const sessionCloseErrorCode quic.StreamErrorCode = 0x170d7b68 + +type SendStream interface { + io.Writer + io.Closer + + StreamID() quic.StreamID + CancelWrite(StreamErrorCode) + + SetWriteDeadline(time.Time) error +} + +type ReceiveStream interface { + io.Reader + + StreamID() quic.StreamID + CancelRead(StreamErrorCode) + + SetReadDeadline(time.Time) error +} + +type Stream interface { + SendStream + ReceiveStream + SetDeadline(time.Time) error +} + +type sendStream struct { + str quic.SendStream + // WebTransport stream header. + // Set by the constructor, set to nil once sent out. + // Might be initialized to nil if this sendStream is part of an incoming bidirectional stream. + streamHdr []byte + + onClose func() +} + +var _ SendStream = &sendStream{} + +func newSendStream(str quic.SendStream, hdr []byte, onClose func()) *sendStream { + return &sendStream{str: str, streamHdr: hdr, onClose: onClose} +} + +func (s *sendStream) maybeSendStreamHeader() error { + if len(s.streamHdr) == 0 { + return nil + } + if _, err := s.str.Write(s.streamHdr); err != nil { + return err + } + s.streamHdr = nil + return nil +} + +func (s *sendStream) Write(b []byte) (int, error) { + if err := s.maybeSendStreamHeader(); err != nil { + return 0, err + } + n, err := s.str.Write(b) + if err != nil && !isTimeoutError(err) { + s.onClose() + } + return n, maybeConvertStreamError(err) +} + +func (s *sendStream) CancelWrite(e StreamErrorCode) { + s.str.CancelWrite(webtransportCodeToHTTPCode(e)) + s.onClose() +} + +func (s *sendStream) closeWithSession() { + s.str.CancelWrite(sessionCloseErrorCode) +} + +func (s *sendStream) Close() error { + if err := s.maybeSendStreamHeader(); err != nil { + return err + } + s.onClose() + return maybeConvertStreamError(s.str.Close()) +} + +func (s *sendStream) SetWriteDeadline(t time.Time) error { + return maybeConvertStreamError(s.str.SetWriteDeadline(t)) +} + +func (s *sendStream) StreamID() quic.StreamID { + return s.str.StreamID() +} + +type receiveStream struct { + str quic.ReceiveStream + onClose func() +} + +var _ ReceiveStream = &receiveStream{} + +func newReceiveStream(str quic.ReceiveStream, onClose func()) *receiveStream { + return &receiveStream{str: str, onClose: onClose} +} + +func (s *receiveStream) Read(b []byte) (int, error) { + n, err := s.str.Read(b) + if err != nil && !isTimeoutError(err) { + s.onClose() + } + return n, maybeConvertStreamError(err) +} + +func (s *receiveStream) CancelRead(e StreamErrorCode) { + s.str.CancelRead(webtransportCodeToHTTPCode(e)) + s.onClose() +} + +func (s *receiveStream) closeWithSession() { + s.str.CancelRead(sessionCloseErrorCode) +} + +func (s *receiveStream) SetReadDeadline(t time.Time) error { + return maybeConvertStreamError(s.str.SetReadDeadline(t)) +} + +func (s *receiveStream) StreamID() quic.StreamID { + return s.str.StreamID() +} + +type stream struct { + *sendStream + *receiveStream + + mx sync.Mutex + sendSideClosed, recvSideClosed bool + onClose func() +} + +var _ Stream = &stream{} + +func newStream(str quic.Stream, hdr []byte, onClose func()) *stream { + s := &stream{onClose: onClose} + s.sendStream = newSendStream(str, hdr, func() { s.registerClose(true) }) + s.receiveStream = newReceiveStream(str, func() { s.registerClose(false) }) + return s +} + +func (s *stream) registerClose(isSendSide bool) { + s.mx.Lock() + if isSendSide { + s.sendSideClosed = true + } else { + s.recvSideClosed = true + } + isClosed := s.sendSideClosed && s.recvSideClosed + s.mx.Unlock() + + if isClosed { + s.onClose() + } +} + +func (s *stream) closeWithSession() { + s.sendStream.closeWithSession() + s.receiveStream.closeWithSession() +} + +func (s *stream) SetDeadline(t time.Time) error { + err1 := s.sendStream.SetWriteDeadline(t) + err2 := s.receiveStream.SetReadDeadline(t) + if err1 != nil { + return err1 + } + return err2 +} + +func (s *stream) StreamID() quic.StreamID { + return s.receiveStream.StreamID() +} + +func maybeConvertStreamError(err error) error { + if err == nil { + return nil + } + var streamErr *quic.StreamError + if errors.As(err, &streamErr) { + errorCode, cerr := httpCodeToWebtransportCode(streamErr.ErrorCode) + if cerr != nil { + return fmt.Errorf("stream reset, but failed to convert stream error %d: %w", streamErr.ErrorCode, cerr) + } + return &StreamError{ErrorCode: errorCode} + } + return err +} + +func isTimeoutError(err error) bool { + nerr, ok := err.(net.Error) + if !ok { + return false + } + return nerr.Timeout() +} diff --git a/vendor/github.com/quic-go/webtransport-go/streams_map.go b/vendor/github.com/quic-go/webtransport-go/streams_map.go new file mode 100644 index 00000000..c1c78323 --- /dev/null +++ b/vendor/github.com/quic-go/webtransport-go/streams_map.go @@ -0,0 +1,42 @@ +package webtransport + +import ( + "sync" + + "github.com/quic-go/quic-go" +) + +type closeFunc func() + +// The streamsMap manages the streams of a single QUIC connection. +// Note that several WebTransport sessions can share one QUIC connection. +type streamsMap struct { + mx sync.Mutex + m map[quic.StreamID]closeFunc +} + +func newStreamsMap() *streamsMap { + return &streamsMap{m: make(map[quic.StreamID]closeFunc)} +} + +func (s *streamsMap) AddStream(id quic.StreamID, close closeFunc) { + s.mx.Lock() + s.m[id] = close + s.mx.Unlock() +} + +func (s *streamsMap) RemoveStream(id quic.StreamID) { + s.mx.Lock() + delete(s.m, id) + s.mx.Unlock() +} + +func (s *streamsMap) CloseSession() { + s.mx.Lock() + defer s.mx.Unlock() + + for _, cl := range s.m { + cl() + } + s.m = nil +} diff --git a/vendor/github.com/quic-go/webtransport-go/version.json b/vendor/github.com/quic-go/webtransport-go/version.json new file mode 100644 index 00000000..cdebeaba --- /dev/null +++ b/vendor/github.com/quic-go/webtransport-go/version.json @@ -0,0 +1,3 @@ +{ + "version": "v0.5.2" +} diff --git a/vendor/github.com/spacemonkeygo/spacelog/.travis.yml b/vendor/github.com/spacemonkeygo/spacelog/.travis.yml deleted file mode 100644 index d2b67f69..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go - -go: - - 1.7 - - 1.8 - - tip diff --git a/vendor/github.com/spacemonkeygo/spacelog/LICENSE b/vendor/github.com/spacemonkeygo/spacelog/LICENSE deleted file mode 100644 index 37ec93a1..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/spacemonkeygo/spacelog/README.md b/vendor/github.com/spacemonkeygo/spacelog/README.md deleted file mode 100644 index 28033f68..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# spacelog [![Build Status](https://api.travis-ci.org/spacemonkeygo/spacelog.svg?branch=master)](https://travis-ci.org/spacemonkeygo/spacelog) - -Please see http://godoc.org/github.com/spacemonkeygo/spacelog for info - -### License - -Copyright (C) 2014 Space Monkey, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/spacemonkeygo/spacelog/capture.go b/vendor/github.com/spacemonkeygo/spacelog/capture.go deleted file mode 100644 index d7ea1ca3..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/capture.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spacelog - -import ( - "fmt" - "os" - "os/exec" -) - -// CaptureOutputToFile opens a filehandle using the given path, then calls -// CaptureOutputToFd on the associated filehandle. -func CaptureOutputToFile(path string) error { - fh, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644) - if err != nil { - return err - } - defer fh.Close() - return CaptureOutputToFd(int(fh.Fd())) -} - -// CaptureOutputToProcess starts a process and using CaptureOutputToFd, -// redirects stdout and stderr to the subprocess' stdin. -// CaptureOutputToProcess expects the subcommand to last the lifetime of the -// process, and if the subprocess dies, will panic. -func CaptureOutputToProcess(command string, args ...string) error { - cmd := exec.Command(command, args...) - out, err := cmd.StdinPipe() - if err != nil { - return err - } - defer out.Close() - type fder interface { - Fd() uintptr - } - out_fder, ok := out.(fder) - if !ok { - return fmt.Errorf("unable to get underlying pipe") - } - err = CaptureOutputToFd(int(out_fder.Fd())) - if err != nil { - return err - } - err = cmd.Start() - if err != nil { - return err - } - go func() { - err := cmd.Wait() - if err != nil { - panic(fmt.Errorf("captured output process died! %s", err)) - } - }() - return nil -} diff --git a/vendor/github.com/spacemonkeygo/spacelog/capture_ae.go b/vendor/github.com/spacemonkeygo/spacelog/capture_ae.go deleted file mode 100644 index f759b6f1..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/capture_ae.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2016 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build appengine - -package spacelog - -import ( - "fmt" -) - -func CaptureOutputToFd(fd int) error { - return fmt.Errorf("CaptureOutputToFd not supported on App Engine") -} diff --git a/vendor/github.com/spacemonkeygo/spacelog/capture_linux.go b/vendor/github.com/spacemonkeygo/spacelog/capture_linux.go deleted file mode 100644 index 34a9c089..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/capture_linux.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !appengine - -package spacelog - -import ( - "syscall" -) - -// CaptureOutputToFd redirects the current process' stdout and stderr file -// descriptors to the given file descriptor, using the dup3 syscall. -func CaptureOutputToFd(fd int) error { - err := syscall.Dup3(fd, syscall.Stdout, 0) - if err != nil { - return err - } - err = syscall.Dup3(fd, syscall.Stderr, 0) - if err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/spacemonkeygo/spacelog/capture_other.go b/vendor/github.com/spacemonkeygo/spacelog/capture_other.go deleted file mode 100644 index 6c65051a..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/capture_other.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows -// +build !linux -// +build !appengine -// +build !solaris - -package spacelog - -import ( - "syscall" -) - -// CaptureOutputToFd redirects the current process' stdout and stderr file -// descriptors to the given file descriptor, using the dup2 syscall. -func CaptureOutputToFd(fd int) error { - err := syscall.Dup2(fd, syscall.Stdout) - if err != nil { - return err - } - err = syscall.Dup2(fd, syscall.Stderr) - if err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/spacemonkeygo/spacelog/capture_solaris.go b/vendor/github.com/spacemonkeygo/spacelog/capture_solaris.go deleted file mode 100644 index d77e4f2d..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/capture_solaris.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spacelog - -import ( - "golang.org/x/sys/unix" -) - -// CaptureOutputToFd redirects the current process' stdout and stderr file -// descriptors to the given file descriptor, using the dup2 syscall. -func CaptureOutputToFd(fd int) error { - err := unix.Dup2(fd, unix.Stdout) - if err != nil { - return err - } - err = unix.Dup2(fd, unix.Stderr) - if err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/spacemonkeygo/spacelog/capture_windows.go b/vendor/github.com/spacemonkeygo/spacelog/capture_windows.go deleted file mode 100644 index e9f061dc..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/capture_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spacelog - -import ( - "fmt" -) - -func CaptureOutputToFd(fd int) error { - return fmt.Errorf("CaptureOutputToFd not supported on Windows") -} diff --git a/vendor/github.com/spacemonkeygo/spacelog/collection.go b/vendor/github.com/spacemonkeygo/spacelog/collection.go deleted file mode 100644 index 8231b4a5..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/collection.go +++ /dev/null @@ -1,271 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spacelog - -import ( - "regexp" - "runtime" - "strings" - "sync" - "text/template" -) - -var ( - // If set, these prefixes will be stripped out of automatic logger names. - IgnoredPrefixes []string - - badChars = regexp.MustCompile("[^a-zA-Z0-9_.-]") - slashes = regexp.MustCompile("[/]") -) - -func callerName() string { - pc, _, _, ok := runtime.Caller(2) - if !ok { - return "unknown.unknown" - } - f := runtime.FuncForPC(pc) - if f == nil { - return "unknown.unknown" - } - name := f.Name() - for _, prefix := range IgnoredPrefixes { - name = strings.TrimPrefix(name, prefix) - } - return badChars.ReplaceAllLiteralString( - slashes.ReplaceAllLiteralString(name, "."), "_") -} - -// LoggerCollections contain all of the loggers a program might use. Typically -// a codebase will just use the default logger collection. -type LoggerCollection struct { - mtx sync.Mutex - loggers map[string]*Logger - level LogLevel - handler Handler -} - -// NewLoggerCollection creates a new logger collection. It's unlikely you will -// ever practically need this method. Use the DefaultLoggerCollection instead. -func NewLoggerCollection() *LoggerCollection { - return &LoggerCollection{ - loggers: make(map[string]*Logger), - level: DefaultLevel, - handler: defaultHandler} -} - -// GetLogger returns a new Logger with a name automatically generated using -// the callstack. If you want to avoid automatic name generation check out -// GetLoggerNamed -func (c *LoggerCollection) GetLogger() *Logger { - return c.GetLoggerNamed(callerName()) -} - -func (c *LoggerCollection) getLogger(name string, level LogLevel, - handler Handler) *Logger { - c.mtx.Lock() - defer c.mtx.Unlock() - - logger, exists := c.loggers[name] - if !exists { - logger = &Logger{level: level, - collection: c, - name: name, - handler: handler} - c.loggers[name] = logger - } - return logger -} - -// ConfigureLoggers configures loggers according to the given string -// specification, which specifies a set of loggers and their associated -// logging levels. Loggers are semicolon-separated; each -// configuration is specified as =. White space outside of -// logger names and levels is ignored. The default level is specified -// with the name "DEFAULT". -// -// An example specification: -// `DEFAULT=ERROR; foo.bar=WARNING` -func (c *LoggerCollection) ConfigureLoggers(specification string) error { - confs := strings.Split(strings.TrimSpace(specification), ";") - for i := range confs { - conf := strings.SplitN(confs[i], "=", 2) - levelstr := strings.TrimSpace(conf[1]) - name := strings.TrimSpace(conf[0]) - level, err := LevelFromString(levelstr) - if err != nil { - return err - } - if name == "DEFAULT" { - c.SetLevel(nil, level) - continue - } - logger := c.GetLoggerNamed(name) - logger.setLevel(level) - } - return nil -} - -// GetLoggerNamed returns a new Logger with the provided name. GetLogger is -// more frequently used. -func (c *LoggerCollection) GetLoggerNamed(name string) *Logger { - c.mtx.Lock() - defer c.mtx.Unlock() - - logger, exists := c.loggers[name] - if !exists { - logger = &Logger{level: c.level, - collection: c, - name: name, - handler: c.handler} - c.loggers[name] = logger - } - return logger -} - -// SetLevel will set the current log level for all loggers with names that -// match a provided regular expression. If the regular expression is nil, then -// all loggers match. -func (c *LoggerCollection) SetLevel(re *regexp.Regexp, level LogLevel) { - c.mtx.Lock() - defer c.mtx.Unlock() - - if re == nil { - c.level = level - } - for name, logger := range c.loggers { - if re == nil || re.MatchString(name) { - logger.setLevel(level) - } - } -} - -// SetHandler will set the current log handler for all loggers with names that -// match a provided regular expression. If the regular expression is nil, then -// all loggers match. -func (c *LoggerCollection) SetHandler(re *regexp.Regexp, handler Handler) { - c.mtx.Lock() - defer c.mtx.Unlock() - - if re == nil { - c.handler = handler - } - for name, logger := range c.loggers { - if re == nil || re.MatchString(name) { - logger.setHandler(handler) - } - } -} - -// SetTextTemplate will set the current text template for all loggers with -// names that match a provided regular expression. If the regular expression -// is nil, then all loggers match. Note that not every handler is guaranteed -// to support text templates and a text template will only apply to -// text-oriented and unstructured handlers. -func (c *LoggerCollection) SetTextTemplate(re *regexp.Regexp, - t *template.Template) { - c.mtx.Lock() - defer c.mtx.Unlock() - - if re == nil { - c.handler.SetTextTemplate(t) - } - for name, logger := range c.loggers { - if re == nil || re.MatchString(name) { - logger.getHandler().SetTextTemplate(t) - } - } -} - -// SetTextOutput will set the current output interface for all loggers with -// names that match a provided regular expression. If the regular expression -// is nil, then all loggers match. Note that not every handler is guaranteed -// to support text output and a text output interface will only apply to -// text-oriented and unstructured handlers. -func (c *LoggerCollection) SetTextOutput(re *regexp.Regexp, - output TextOutput) { - c.mtx.Lock() - defer c.mtx.Unlock() - - if re == nil { - c.handler.SetTextOutput(output) - } - for name, logger := range c.loggers { - if re == nil || re.MatchString(name) { - logger.getHandler().SetTextOutput(output) - } - } -} - -var ( - // It's unlikely you'll need to use this directly - DefaultLoggerCollection = NewLoggerCollection() -) - -// GetLogger returns an automatically-named logger on the default logger -// collection. -func GetLogger() *Logger { - return DefaultLoggerCollection.GetLoggerNamed(callerName()) -} - -// GetLoggerNamed returns a new Logger with the provided name on the default -// logger collection. GetLogger is more frequently used. -func GetLoggerNamed(name string) *Logger { - return DefaultLoggerCollection.GetLoggerNamed(name) -} - -// ConfigureLoggers configures loggers according to the given string -// specification, which specifies a set of loggers and their associated -// logging levels. Loggers are colon- or semicolon-separated; each -// configuration is specified as =. White space outside of -// logger names and levels is ignored. The DEFAULT module is specified -// with the name "DEFAULT". -// -// An example specification: -// `DEFAULT=ERROR; foo.bar=WARNING` -func ConfigureLoggers(specification string) error { - return DefaultLoggerCollection.ConfigureLoggers(specification) -} - -// SetLevel will set the current log level for all loggers on the default -// collection with names that match a provided regular expression. If the -// regular expression is nil, then all loggers match. -func SetLevel(re *regexp.Regexp, level LogLevel) { - DefaultLoggerCollection.SetLevel(re, level) -} - -// SetHandler will set the current log handler for all loggers on the default -// collection with names that match a provided regular expression. If the -// regular expression is nil, then all loggers match. -func SetHandler(re *regexp.Regexp, handler Handler) { - DefaultLoggerCollection.SetHandler(re, handler) -} - -// SetTextTemplate will set the current text template for all loggers on the -// default collection with names that match a provided regular expression. If -// the regular expression is nil, then all loggers match. Note that not every -// handler is guaranteed to support text templates and a text template will -// only apply to text-oriented and unstructured handlers. -func SetTextTemplate(re *regexp.Regexp, t *template.Template) { - DefaultLoggerCollection.SetTextTemplate(re, t) -} - -// SetTextOutput will set the current output interface for all loggers on the -// default collection with names that match a provided regular expression. If -// the regular expression is nil, then all loggers match. Note that not every -// handler is guaranteed to support text output and a text output interface -// will only apply to text-oriented and unstructured handlers. -func SetTextOutput(re *regexp.Regexp, output TextOutput) { - DefaultLoggerCollection.SetTextOutput(re, output) -} diff --git a/vendor/github.com/spacemonkeygo/spacelog/convenience.go b/vendor/github.com/spacemonkeygo/spacelog/convenience.go deleted file mode 100644 index b3056329..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/convenience.go +++ /dev/null @@ -1,296 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spacelog - -import ( - "fmt" - "io" -) - -// Trace logs a collection of values if the logger's level is trace or even -// more permissive. -func (l *Logger) Trace(v ...interface{}) { - if l.getLevel() <= Trace { - l.getHandler().Log(l.name, Trace, fmt.Sprint(v...), 1) - } -} - -// Tracef logs a format string with values if the logger's level is trace or -// even more permissive. -func (l *Logger) Tracef(format string, v ...interface{}) { - if l.getLevel() <= Trace { - l.getHandler().Log(l.name, Trace, fmt.Sprintf(format, v...), 1) - } -} - -// Tracee logs an error value if the error is not nil and the logger's level -// is trace or even more permissive. -func (l *Logger) Tracee(err error) { - if l.getLevel() <= Trace && err != nil { - l.getHandler().Log(l.name, Trace, err.Error(), 1) - } -} - -// TraceEnabled returns true if the logger's level is trace or even more -// permissive. -func (l *Logger) TraceEnabled() bool { - return l.getLevel() <= Trace -} - -// Debug logs a collection of values if the logger's level is debug or even -// more permissive. -func (l *Logger) Debug(v ...interface{}) { - if l.getLevel() <= Debug { - l.getHandler().Log(l.name, Debug, fmt.Sprint(v...), 1) - } -} - -// Debugf logs a format string with values if the logger's level is debug or -// even more permissive. -func (l *Logger) Debugf(format string, v ...interface{}) { - if l.getLevel() <= Debug { - l.getHandler().Log(l.name, Debug, fmt.Sprintf(format, v...), 1) - } -} - -// Debuge logs an error value if the error is not nil and the logger's level -// is debug or even more permissive. -func (l *Logger) Debuge(err error) { - if l.getLevel() <= Debug && err != nil { - l.getHandler().Log(l.name, Debug, err.Error(), 1) - } -} - -// DebugEnabled returns true if the logger's level is debug or even more -// permissive. -func (l *Logger) DebugEnabled() bool { - return l.getLevel() <= Debug -} - -// Info logs a collection of values if the logger's level is info or even -// more permissive. -func (l *Logger) Info(v ...interface{}) { - if l.getLevel() <= Info { - l.getHandler().Log(l.name, Info, fmt.Sprint(v...), 1) - } -} - -// Infof logs a format string with values if the logger's level is info or -// even more permissive. -func (l *Logger) Infof(format string, v ...interface{}) { - if l.getLevel() <= Info { - l.getHandler().Log(l.name, Info, fmt.Sprintf(format, v...), 1) - } -} - -// Infoe logs an error value if the error is not nil and the logger's level -// is info or even more permissive. -func (l *Logger) Infoe(err error) { - if l.getLevel() <= Info && err != nil { - l.getHandler().Log(l.name, Info, err.Error(), 1) - } -} - -// InfoEnabled returns true if the logger's level is info or even more -// permissive. -func (l *Logger) InfoEnabled() bool { - return l.getLevel() <= Info -} - -// Notice logs a collection of values if the logger's level is notice or even -// more permissive. -func (l *Logger) Notice(v ...interface{}) { - if l.getLevel() <= Notice { - l.getHandler().Log(l.name, Notice, fmt.Sprint(v...), 1) - } -} - -// Noticef logs a format string with values if the logger's level is notice or -// even more permissive. -func (l *Logger) Noticef(format string, v ...interface{}) { - if l.getLevel() <= Notice { - l.getHandler().Log(l.name, Notice, fmt.Sprintf(format, v...), 1) - } -} - -// Noticee logs an error value if the error is not nil and the logger's level -// is notice or even more permissive. -func (l *Logger) Noticee(err error) { - if l.getLevel() <= Notice && err != nil { - l.getHandler().Log(l.name, Notice, err.Error(), 1) - } -} - -// NoticeEnabled returns true if the logger's level is notice or even more -// permissive. -func (l *Logger) NoticeEnabled() bool { - return l.getLevel() <= Notice -} - -// Warn logs a collection of values if the logger's level is warning or even -// more permissive. -func (l *Logger) Warn(v ...interface{}) { - if l.getLevel() <= Warning { - l.getHandler().Log(l.name, Warning, fmt.Sprint(v...), 1) - } -} - -// Warnf logs a format string with values if the logger's level is warning or -// even more permissive. -func (l *Logger) Warnf(format string, v ...interface{}) { - if l.getLevel() <= Warning { - l.getHandler().Log(l.name, Warning, fmt.Sprintf(format, v...), 1) - } -} - -// Warne logs an error value if the error is not nil and the logger's level -// is warning or even more permissive. -func (l *Logger) Warne(err error) { - if l.getLevel() <= Warning && err != nil { - l.getHandler().Log(l.name, Warning, err.Error(), 1) - } -} - -// WarnEnabled returns true if the logger's level is warning or even more -// permissive. -func (l *Logger) WarnEnabled() bool { - return l.getLevel() <= Warning -} - -// Error logs a collection of values if the logger's level is error or even -// more permissive. -func (l *Logger) Error(v ...interface{}) { - if l.getLevel() <= Error { - l.getHandler().Log(l.name, Error, fmt.Sprint(v...), 1) - } -} - -// Errorf logs a format string with values if the logger's level is error or -// even more permissive. -func (l *Logger) Errorf(format string, v ...interface{}) { - if l.getLevel() <= Error { - l.getHandler().Log(l.name, Error, fmt.Sprintf(format, v...), 1) - } -} - -// Errore logs an error value if the error is not nil and the logger's level -// is error or even more permissive. -func (l *Logger) Errore(err error) { - if l.getLevel() <= Error && err != nil { - l.getHandler().Log(l.name, Error, err.Error(), 1) - } -} - -// ErrorEnabled returns true if the logger's level is error or even more -// permissive. -func (l *Logger) ErrorEnabled() bool { - return l.getLevel() <= Error -} - -// Crit logs a collection of values if the logger's level is critical or even -// more permissive. -func (l *Logger) Crit(v ...interface{}) { - if l.getLevel() <= Critical { - l.getHandler().Log(l.name, Critical, fmt.Sprint(v...), 1) - } -} - -// Critf logs a format string with values if the logger's level is critical or -// even more permissive. -func (l *Logger) Critf(format string, v ...interface{}) { - if l.getLevel() <= Critical { - l.getHandler().Log(l.name, Critical, fmt.Sprintf(format, v...), 1) - } -} - -// Crite logs an error value if the error is not nil and the logger's level -// is critical or even more permissive. -func (l *Logger) Crite(err error) { - if l.getLevel() <= Critical && err != nil { - l.getHandler().Log(l.name, Critical, err.Error(), 1) - } -} - -// CritEnabled returns true if the logger's level is critical or even more -// permissive. -func (l *Logger) CritEnabled() bool { - return l.getLevel() <= Critical -} - -// Log logs a collection of values if the logger's level is the provided level -// or even more permissive. -func (l *Logger) Log(level LogLevel, v ...interface{}) { - if l.getLevel() <= level { - l.getHandler().Log(l.name, level, fmt.Sprint(v...), 1) - } -} - -// Logf logs a format string with values if the logger's level is the provided -// level or even more permissive. -func (l *Logger) Logf(level LogLevel, format string, v ...interface{}) { - if l.getLevel() <= level { - l.getHandler().Log(l.name, level, fmt.Sprintf(format, v...), 1) - } -} - -// Loge logs an error value if the error is not nil and the logger's level -// is the provided level or even more permissive. -func (l *Logger) Loge(level LogLevel, err error) { - if l.getLevel() <= level && err != nil { - l.getHandler().Log(l.name, level, err.Error(), 1) - } -} - -// LevelEnabled returns true if the logger's level is the provided level or -// even more permissive. -func (l *Logger) LevelEnabled(level LogLevel) bool { - return l.getLevel() <= level -} - -type writer struct { - l *Logger - level LogLevel -} - -func (w *writer) Write(data []byte) (int, error) { - if w.l.getLevel() <= w.level { - w.l.getHandler().Log(w.l.name, w.level, string(data), 1) - } - return len(data), nil -} - -// Writer returns an io.Writer that writes messages at the given log level. -func (l *Logger) Writer(level LogLevel) io.Writer { - return &writer{l: l, level: level} -} - -type writerNoCaller struct { - l *Logger - level LogLevel -} - -func (w *writerNoCaller) Write(data []byte) (int, error) { - if w.l.getLevel() <= w.level { - w.l.getHandler().Log(w.l.name, w.level, string(data), -1) - } - return len(data), nil -} - -// WriterWithoutCaller returns an io.Writer that writes messages at the given -// log level, but does not attempt to collect the Write caller, and provides -// no caller information to the log event. -func (l *Logger) WriterWithoutCaller(level LogLevel) io.Writer { - return &writerNoCaller{l: l, level: level} -} diff --git a/vendor/github.com/spacemonkeygo/spacelog/doc.go b/vendor/github.com/spacemonkeygo/spacelog/doc.go deleted file mode 100644 index 28c25b4d..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/doc.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package spacelog is a collection of interface lego bricks designed to help you -build a flexible logging system. - -spacelog is loosely inspired by the Python logging library. - -The basic interaction is between a Logger and a Handler. A Logger is -what the programmer typically interacts with for creating log messages. A -Logger will be at a given log level, and if log messages can clear that -specific logger's log level filter, they will be passed off to the Handler. - -Loggers are instantiated from GetLogger and GetLoggerNamed. - -A Handler is a very generic interface for handling log events. You can provide -your own Handler for doing structured JSON output or colorized output or -countless other things. - -Provided are a simple TextHandler with a variety of log event templates and -TextOutput sinks, such as io.Writer, Syslog, and so forth. - -Make sure to see the source of the setup subpackage for an example of easy and -configurable logging setup at process start: - http://godoc.org/github.com/spacemonkeygo/spacelog/setup -*/ -package spacelog diff --git a/vendor/github.com/spacemonkeygo/spacelog/event.go b/vendor/github.com/spacemonkeygo/spacelog/event.go deleted file mode 100644 index da863cbf..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/event.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spacelog - -import ( - "path/filepath" - "strings" - "time" -) - -// TermColors is a type that knows how to output terminal colors and formatting -type TermColors struct{} - -// LogEvent is a type made by the default text handler for feeding to log -// templates. It has as much contextual data about the log event as possible. -type LogEvent struct { - LoggerName string - Level LogLevel - Message string - Filepath string - Line int - Timestamp time.Time - - TermColors -} - -// Reset resets the color palette for terminals that support color -func (TermColors) Reset() string { return "\x1b[0m" } -func (TermColors) Bold() string { return "\x1b[1m" } -func (TermColors) Underline() string { return "\x1b[4m" } -func (TermColors) Black() string { return "\x1b[30m" } -func (TermColors) Red() string { return "\x1b[31m" } -func (TermColors) Green() string { return "\x1b[32m" } -func (TermColors) Yellow() string { return "\x1b[33m" } -func (TermColors) Blue() string { return "\x1b[34m" } -func (TermColors) Magenta() string { return "\x1b[35m" } -func (TermColors) Cyan() string { return "\x1b[36m" } -func (TermColors) White() string { return "\x1b[37m" } - -func (l *LogEvent) Filename() string { - if l.Filepath == "" { - return "" - } - return filepath.Base(l.Filepath) -} - -func (l *LogEvent) Time() string { - return l.Timestamp.Format("15:04:05") -} - -func (l *LogEvent) Date() string { - return l.Timestamp.Format("2006/01/02") -} - -// LevelJustified returns the log level in string form justified so that all -// log levels take the same text width. -func (l *LogEvent) LevelJustified() (rv string) { - rv = l.Level.String() - if len(rv) < 5 { - rv += strings.Repeat(" ", 5-len(rv)) - } - return rv -} diff --git a/vendor/github.com/spacemonkeygo/spacelog/handler.go b/vendor/github.com/spacemonkeygo/spacelog/handler.go deleted file mode 100644 index e3db0865..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/handler.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spacelog - -import ( - "text/template" -) - -// Handler is an interface that knows how to process log events. This is the -// basic interface type for building a logging system. If you want to route -// structured log data somewhere, you would implement this interface. -type Handler interface { - // Log is called for every message. if calldepth is negative, caller - // information is missing - Log(logger_name string, level LogLevel, msg string, calldepth int) - - // These two calls are expected to be no-ops on non-text-output handlers - SetTextTemplate(t *template.Template) - SetTextOutput(output TextOutput) -} - -// HandlerFunc is a type to make implementation of the Handler interface easier -type HandlerFunc func(logger_name string, level LogLevel, msg string, - calldepth int) - -// Log simply calls f(logger_name, level, msg, calldepth) -func (f HandlerFunc) Log(logger_name string, level LogLevel, msg string, - calldepth int) { - f(logger_name, level, msg, calldepth) -} - -// SetTextTemplate is a no-op -func (HandlerFunc) SetTextTemplate(t *template.Template) {} - -// SetTextOutput is a no-op -func (HandlerFunc) SetTextOutput(output TextOutput) {} - -var ( - defaultHandler = NewTextHandler(StdlibTemplate, - &StdlibOutput{}) -) diff --git a/vendor/github.com/spacemonkeygo/spacelog/level.go b/vendor/github.com/spacemonkeygo/spacelog/level.go deleted file mode 100644 index bf507075..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/level.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spacelog - -import ( - "fmt" - "strconv" - "strings" -) - -type LogLevel int32 - -const ( - Trace LogLevel = 5 - Debug LogLevel = 10 - Info LogLevel = 20 - Notice LogLevel = 30 - Warning LogLevel = 40 - Error LogLevel = 50 - Critical LogLevel = 60 - // syslog has Alert - // syslog has Emerg - - DefaultLevel = Notice -) - -// String returns the log level name in short form -func (l LogLevel) String() string { - switch l.Match() { - case Critical: - return "CRIT" - case Error: - return "ERR" - case Warning: - return "WARN" - case Notice: - return "NOTE" - case Info: - return "INFO" - case Debug: - return "DEBUG" - case Trace: - return "TRACE" - default: - return "UNSET" - } -} - -// String returns the log level name in long human readable form -func (l LogLevel) Name() string { - switch l.Match() { - case Critical: - return "critical" - case Error: - return "error" - case Warning: - return "warning" - case Notice: - return "notice" - case Info: - return "info" - case Debug: - return "debug" - case Trace: - return "trace" - default: - return "unset" - } -} - -// Match returns the greatest named log level that is less than or equal to -// the receiver log level. For example, if the log level is 43, Match() will -// return 40 (Warning) -func (l LogLevel) Match() LogLevel { - if l >= Critical { - return Critical - } - if l >= Error { - return Error - } - if l >= Warning { - return Warning - } - if l >= Notice { - return Notice - } - if l >= Info { - return Info - } - if l >= Debug { - return Debug - } - if l >= Trace { - return Trace - } - return 0 -} - -// LevelFromString will convert a named log level to its corresponding value -// type, or error if both the name was unknown and an integer value was unable -// to be parsed. -func LevelFromString(str string) (LogLevel, error) { - switch strings.ToLower(str) { - case "crit", "critical": - return Critical, nil - case "err", "error": - return Error, nil - case "warn", "warning": - return Warning, nil - case "note", "notice": - return Notice, nil - case "info": - return Info, nil - case "debug": - return Debug, nil - case "trace": - return Trace, nil - } - val, err := strconv.ParseInt(str, 10, 32) - if err == nil { - return LogLevel(val), nil - } - return 0, fmt.Errorf("Invalid log level: %s", str) -} diff --git a/vendor/github.com/spacemonkeygo/spacelog/logger.go b/vendor/github.com/spacemonkeygo/spacelog/logger.go deleted file mode 100644 index ae1734b2..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/logger.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spacelog - -import ( - "sync" - "sync/atomic" -) - -// Logger is the basic type that allows for logging. A logger has an associated -// name, given to it during construction, either through a logger collection, -// GetLogger, GetLoggerNamed, or another Logger's Scope method. A logger also -// has an associated level and handler, typically configured through the logger -// collection to which it belongs. -type Logger struct { - level LogLevel - name string - collection *LoggerCollection - - handler_mtx sync.RWMutex - handler Handler -} - -// Scope returns a new Logger with the same level and handler, using the -// receiver Logger's name as a prefix. -func (l *Logger) Scope(name string) *Logger { - return l.collection.getLogger(l.name+"."+name, l.getLevel(), - l.getHandler()) -} - -func (l *Logger) setLevel(level LogLevel) { - atomic.StoreInt32((*int32)(&l.level), int32(level)) -} - -func (l *Logger) getLevel() LogLevel { - return LogLevel(atomic.LoadInt32((*int32)(&l.level))) -} - -func (l *Logger) setHandler(handler Handler) { - l.handler_mtx.Lock() - defer l.handler_mtx.Unlock() - l.handler = handler -} - -func (l *Logger) getHandler() Handler { - l.handler_mtx.RLock() - defer l.handler_mtx.RUnlock() - return l.handler -} diff --git a/vendor/github.com/spacemonkeygo/spacelog/output.go b/vendor/github.com/spacemonkeygo/spacelog/output.go deleted file mode 100644 index 8751268f..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/output.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spacelog - -import ( - "bytes" - "fmt" - "io" - "log" - "os" - "sync" -) - -type TextOutput interface { - Output(LogLevel, []byte) -} - -// WriterOutput is an io.Writer wrapper that matches the TextOutput interface -type WriterOutput struct { - w io.Writer -} - -// NewWriterOutput returns a TextOutput that writes messages to an io.Writer -func NewWriterOutput(w io.Writer) *WriterOutput { - return &WriterOutput{w: w} -} - -func (o *WriterOutput) Output(_ LogLevel, message []byte) { - o.w.Write(append(bytes.TrimRight(message, "\r\n"), platformNewline...)) -} - -// StdlibOutput is a TextOutput that simply writes to the default Go stdlib -// logging system. It is the default. If you configure the Go stdlib to write -// to spacelog, make sure to provide a new TextOutput to your logging -// collection -type StdlibOutput struct{} - -func (*StdlibOutput) Output(_ LogLevel, message []byte) { - log.Print(string(message)) -} - -type bufferMsg struct { - level LogLevel - message []byte -} - -// BufferedOutput uses a channel to synchronize writes to a wrapped TextOutput -// and allows for buffering a limited amount of log events. -type BufferedOutput struct { - o TextOutput - c chan bufferMsg - running sync.Mutex - close_once sync.Once -} - -// NewBufferedOutput returns a BufferedOutput wrapping output with a buffer -// size of buffer. -func NewBufferedOutput(output TextOutput, buffer int) *BufferedOutput { - if buffer < 0 { - buffer = 0 - } - b := &BufferedOutput{ - o: output, - c: make(chan bufferMsg, buffer)} - go b.process() - return b -} - -// Close shuts down the BufferedOutput's processing -func (b *BufferedOutput) Close() { - b.close_once.Do(func() { - close(b.c) - }) - b.running.Lock() - b.running.Unlock() -} - -func (b *BufferedOutput) Output(level LogLevel, message []byte) { - b.c <- bufferMsg{level: level, message: message} -} - -func (b *BufferedOutput) process() { - b.running.Lock() - defer b.running.Unlock() - for { - msg, open := <-b.c - if !open { - break - } - b.o.Output(msg.level, msg.message) - } -} - -// A TextOutput object that also implements HupHandlingTextOutput may have its -// OnHup() method called when an administrative signal is sent to this process. -type HupHandlingTextOutput interface { - TextOutput - OnHup() -} - -// FileWriterOutput is like WriterOutput with a plain file handle, but it -// knows how to reopen the file (or try to reopen it) if it hasn't been able -// to open the file previously, or if an appropriate signal has been received. -type FileWriterOutput struct { - *WriterOutput - path string -} - -// Creates a new FileWriterOutput object. This is the only case where an -// error opening the file will be reported to the caller; if we try to -// reopen it later and the reopen fails, we'll just keep trying until it -// works. -func NewFileWriterOutput(path string) (*FileWriterOutput, error) { - fo := &FileWriterOutput{path: path} - fh, err := fo.openFile() - if err != nil { - return nil, err - } - fo.WriterOutput = NewWriterOutput(fh) - return fo, nil -} - -// Try to open the file with the path associated with this object. -func (fo *FileWriterOutput) openFile() (*os.File, error) { - return os.OpenFile(fo.path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644) -} - -// Try to communicate a message without using our log file. In all likelihood, -// stderr is closed or redirected to /dev/null, but at least we can try -// writing there. In the very worst case, if an admin attaches a ptrace to -// this process, it will be more clear what the problem is. -func (fo *FileWriterOutput) fallbackLog(tmpl string, args ...interface{}) { - fmt.Fprintf(os.Stderr, tmpl, args...) -} - -// Output a log line by writing it to the file. If the file has been -// released, try to open it again. If that fails, cry for a little -// while, then throw away the message and carry on. -func (fo *FileWriterOutput) Output(ll LogLevel, message []byte) { - if fo.WriterOutput == nil { - fh, err := fo.openFile() - if err != nil { - fo.fallbackLog("Could not open %#v: %s", fo.path, err) - return - } - fo.WriterOutput = NewWriterOutput(fh) - } - fo.WriterOutput.Output(ll, message) -} - -// Throw away any references/handles to the output file. This probably -// means the admin wants to rotate the file out and have this process -// open a new one. Close the underlying io.Writer if that is a thing -// that it knows how to do. -func (fo *FileWriterOutput) OnHup() { - if fo.WriterOutput != nil { - wc, ok := fo.WriterOutput.w.(io.Closer) - if ok { - err := wc.Close() - if err != nil { - fo.fallbackLog("Closing %#v failed: %s", fo.path, err) - } - } - fo.WriterOutput = nil - } -} diff --git a/vendor/github.com/spacemonkeygo/spacelog/output_other.go b/vendor/github.com/spacemonkeygo/spacelog/output_other.go deleted file mode 100644 index 2be240a1..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/output_other.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows - -package spacelog - -var platformNewline = []byte("\n") diff --git a/vendor/github.com/spacemonkeygo/spacelog/output_windows.go b/vendor/github.com/spacemonkeygo/spacelog/output_windows.go deleted file mode 100644 index 58b71dab..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/output_windows.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spacelog - -var platformNewline = []byte("\r\n") diff --git a/vendor/github.com/spacemonkeygo/spacelog/setup.go b/vendor/github.com/spacemonkeygo/spacelog/setup.go deleted file mode 100644 index 2c1cbcee..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/setup.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spacelog - -import ( - "bytes" - "fmt" - "log" - "math" - "os" - "os/signal" - "regexp" - "strings" - "text/template" -) - -// SetupConfig is a configuration struct meant to be used with -// github.com/spacemonkeygo/flagfile/utils.Setup -// but can be used independently. -type SetupConfig struct { - Output string `default:"stderr" usage:"log output. can be stdout, stderr, syslog, or a path"` - Level string `default:"" usage:"base logger level"` - Filter string `default:"" usage:"sets loggers matching this regular expression to the lowest level"` - Format string `default:"" usage:"format string to use"` - Stdlevel string `default:"warn" usage:"logger level for stdlib log integration"` - Subproc string `default:"" usage:"process to run for stdout/stderr-captured logging. The command is first processed as a Go template that supports {{.Facility}}, {{.Level}}, and {{.Name}} fields, and then passed to sh. If set, will redirect stdout and stderr to the given process. A good default is 'setsid logger --priority {{.Facility}}.{{.Level}} --tag {{.Name}}'"` - Buffer int `default:"0" usage:"the number of messages to buffer. 0 for no buffer"` - // Facility defaults to syslog.LOG_USER (which is 8) - Facility int `default:"8" usage:"the syslog facility to use if syslog output is configured"` - HupRotate bool `default:"false" usage:"if true, sending a HUP signal will reopen log files"` - Config string `default:"" usage:"a semicolon separated list of logger=level; sets each log to the corresponding level"` -} - -var ( - stdlog = GetLoggerNamed("stdlog") - funcmap = template.FuncMap{"ColorizeLevel": ColorizeLevel} -) - -// SetFormatMethod adds functions to the template function map, such that -// command-line and Setup provided templates can call methods added to the map -// via this method. The map comes prepopulated with ColorizeLevel, but can be -// overridden. SetFormatMethod should be called (if at all) before one of -// this package's Setup methods. -func SetFormatMethod(name string, fn interface{}) { - funcmap[name] = fn -} - -// MustSetup is the same as Setup, but panics instead of returning an error -func MustSetup(procname string, config SetupConfig) { - err := Setup(procname, config) - if err != nil { - panic(err) - } -} - -type subprocInfo struct { - Facility string - Level string - Name string -} - -// Setup takes a given procname and sets spacelog up with the given -// configuration. Setup supports: -// * capturing stdout and stderr to a subprocess -// * configuring the default level -// * configuring log filters (enabling only some loggers) -// * configuring the logging template -// * configuring the output (a file, syslog, stdout, stderr) -// * configuring log event buffering -// * capturing all standard library logging with configurable log level -// It is expected that this method will be called once at process start. -func Setup(procname string, config SetupConfig) error { - if config.Subproc != "" { - t, err := template.New("subproc").Parse(config.Subproc) - if err != nil { - return err - } - var buf bytes.Buffer - err = t.Execute(&buf, &subprocInfo{ - Facility: fmt.Sprintf("%d", config.Facility), - Level: fmt.Sprintf("%d", 2), // syslog.LOG_CRIT - Name: procname}) - if err != nil { - return err - } - err = CaptureOutputToProcess("sh", "-c", string(buf.Bytes())) - if err != nil { - return err - } - } - if config.Config != "" { - err := ConfigureLoggers(config.Config) - if err != nil { - return err - } - } - if config.Level != "" { - level_val, err := LevelFromString(config.Level) - if err != nil { - return err - } - if level_val != DefaultLevel { - SetLevel(nil, level_val) - } - } - if config.Filter != "" { - re, err := regexp.Compile(config.Filter) - if err != nil { - return err - } - SetLevel(re, LogLevel(math.MinInt32)) - } - var t *template.Template - if config.Format != "" { - var err error - t, err = template.New("user").Funcs(funcmap).Parse(config.Format) - if err != nil { - return err - } - } - var textout TextOutput - switch strings.ToLower(config.Output) { - case "syslog": - w, err := NewSyslogOutput(SyslogPriority(config.Facility), procname) - if err != nil { - return err - } - if t == nil { - t = SyslogTemplate - } - textout = w - case "stdout": - if t == nil { - t = DefaultTemplate - } - textout = NewWriterOutput(os.Stdout) - case "stderr", "": - if t == nil { - t = DefaultTemplate - } - textout = NewWriterOutput(os.Stderr) - default: - if t == nil { - t = StandardTemplate - } - var err error - textout, err = NewFileWriterOutput(config.Output) - if err != nil { - return err - } - } - if config.HupRotate { - if hh, ok := textout.(HupHandlingTextOutput); ok { - sigchan := make(chan os.Signal) - signal.Notify(sigchan, sigHUP) - go func() { - for _ = range sigchan { - hh.OnHup() - } - }() - } - } - if config.Buffer > 0 { - textout = NewBufferedOutput(textout, config.Buffer) - } - SetHandler(nil, NewTextHandler(t, textout)) - log.SetFlags(log.Lshortfile) - if config.Stdlevel == "" { - config.Stdlevel = "warn" - } - stdlog_level_val, err := LevelFromString(config.Stdlevel) - if err != nil { - return err - } - log.SetOutput(stdlog.WriterWithoutCaller(stdlog_level_val)) - return nil -} diff --git a/vendor/github.com/spacemonkeygo/spacelog/sighup_appengine.go b/vendor/github.com/spacemonkeygo/spacelog/sighup_appengine.go deleted file mode 100644 index c12ed961..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/sighup_appengine.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (C) 2017 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build appengine - -package spacelog - -import ( - "strconv" -) - -const ( - sigHUP = syscallSignal(0x1) -) - -type syscallSignal int - -func (s syscallSignal) Signal() {} - -func (s syscallSignal) String() string { - switch s { - case sigHUP: - return "hangup" - } - return "signal " + strconv.Itoa(int(s)) -} diff --git a/vendor/github.com/spacemonkeygo/spacelog/sighup_other.go b/vendor/github.com/spacemonkeygo/spacelog/sighup_other.go deleted file mode 100644 index 0e033a8d..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/sighup_other.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2017 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !appengine - -package spacelog - -import "syscall" - -const ( - sigHUP = syscall.SIGHUP -) diff --git a/vendor/github.com/spacemonkeygo/spacelog/syslog.go b/vendor/github.com/spacemonkeygo/spacelog/syslog.go deleted file mode 100644 index c2317b6c..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/syslog.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows - -package spacelog - -import ( - "bytes" - "log/syslog" -) - -type SyslogPriority syslog.Priority - -// SyslogOutput is a syslog client that matches the TextOutput interface -type SyslogOutput struct { - w *syslog.Writer -} - -// NewSyslogOutput returns a TextOutput object that writes to syslog using -// the given facility and tag. The log level will be determined by the log -// event. -func NewSyslogOutput(facility SyslogPriority, tag string) ( - TextOutput, error) { - w, err := syslog.New(syslog.Priority(facility), tag) - if err != nil { - return nil, err - } - return &SyslogOutput{w: w}, nil -} - -func (o *SyslogOutput) Output(level LogLevel, message []byte) { - level = level.Match() - for _, msg := range bytes.Split(message, []byte{'\n'}) { - switch level { - case Critical: - o.w.Crit(string(msg)) - case Error: - o.w.Err(string(msg)) - case Warning: - o.w.Warning(string(msg)) - case Notice: - o.w.Notice(string(msg)) - case Info: - o.w.Info(string(msg)) - case Debug: - fallthrough - case Trace: - fallthrough - default: - o.w.Debug(string(msg)) - } - } -} diff --git a/vendor/github.com/spacemonkeygo/spacelog/syslog_windows.go b/vendor/github.com/spacemonkeygo/spacelog/syslog_windows.go deleted file mode 100644 index edba3c2a..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/syslog_windows.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spacelog - -import ( - "fmt" -) - -type SyslogPriority int - -func NewSyslogOutput(facility SyslogPriority, tag string) ( - TextOutput, error) { - return nil, fmt.Errorf("SyslogOutput not supported on Windows") -} diff --git a/vendor/github.com/spacemonkeygo/spacelog/templates.go b/vendor/github.com/spacemonkeygo/spacelog/templates.go deleted file mode 100644 index 959033da..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/templates.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spacelog - -import ( - "text/template" -) - -// ColorizeLevel returns a TermColor byte sequence for the appropriate color -// for the level. If you'd like to configure your own color choices, you can -// make your own template with its own function map to your own colorize -// function. -func ColorizeLevel(level LogLevel) string { - switch level.Match() { - case Critical, Error: - return TermColors{}.Red() - case Warning: - return TermColors{}.Magenta() - case Notice: - return TermColors{}.Yellow() - case Info, Debug, Trace: - return TermColors{}.Green() - } - return "" -} - -var ( - // ColorTemplate uses the default ColorizeLevel method for color choices. - ColorTemplate = template.Must(template.New("color").Funcs(template.FuncMap{ - "ColorizeLevel": ColorizeLevel}).Parse( - `{{.Blue}}{{.Date}} {{.Time}}{{.Reset}} ` + - `{{.Bold}}{{ColorizeLevel .Level}}{{.LevelJustified}}{{.Reset}} ` + - `{{.Underline}}{{.LoggerName}}{{.Reset}} ` + - `{{if .Filename}}{{.Filename}}:{{.Line}} {{end}}- ` + - `{{ColorizeLevel .Level}}{{.Message}}{{.Reset}}`)) - - // StandardTemplate is like ColorTemplate with no color. - StandardTemplate = template.Must(template.New("standard").Parse( - `{{.Date}} {{.Time}} ` + - `{{.Level}} {{.LoggerName}} ` + - `{{if .Filename}}{{.Filename}}:{{.Line}} {{end}}` + - `- {{.Message}}`)) - - // SyslogTemplate is missing the date and time as syslog adds those - // things. - SyslogTemplate = template.Must(template.New("syslog").Parse( - `{{.Level}} {{.LoggerName}} ` + - `{{if .Filename}}{{.Filename}}:{{.Line}} {{end}}` + - `- {{.Message}}`)) - - // StdlibTemplate is missing the date and time as the stdlib logger often - // adds those things. - StdlibTemplate = template.Must(template.New("stdlib").Parse( - `{{.Level}} {{.LoggerName}} ` + - `{{if .Filename}}{{.Filename}}:{{.Line}} {{end}}` + - `- {{.Message}}`)) -) diff --git a/vendor/github.com/spacemonkeygo/spacelog/templates_others.go b/vendor/github.com/spacemonkeygo/spacelog/templates_others.go deleted file mode 100644 index 114e2e14..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/templates_others.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows - -package spacelog - -var ( - // DefaultTemplate is default template for stdout/stderr for the platform - DefaultTemplate = ColorTemplate -) diff --git a/vendor/github.com/spacemonkeygo/spacelog/templates_windows.go b/vendor/github.com/spacemonkeygo/spacelog/templates_windows.go deleted file mode 100644 index 512b6004..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/templates_windows.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spacelog - -var ( - // DefaultTemplate is default template for stdout/stderr for the platform - DefaultTemplate = StandardTemplate -) diff --git a/vendor/github.com/spacemonkeygo/spacelog/text.go b/vendor/github.com/spacemonkeygo/spacelog/text.go deleted file mode 100644 index 8b36ce99..00000000 --- a/vendor/github.com/spacemonkeygo/spacelog/text.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) 2014 Space Monkey, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spacelog - -import ( - "bytes" - "fmt" - "runtime" - "strings" - "sync" - "text/template" - "time" -) - -// TextHandler is the default implementation of the Handler interface. A -// TextHandler, on log events, makes LogEvent structures, passes them to the -// configured template, and then passes that output to a configured TextOutput -// interface. -type TextHandler struct { - mtx sync.RWMutex - template *template.Template - output TextOutput -} - -// NewTextHandler creates a Handler that creates LogEvents, passes them to -// the given template, and passes the result to output -func NewTextHandler(t *template.Template, output TextOutput) *TextHandler { - return &TextHandler{template: t, output: output} -} - -// Log makes a LogEvent, formats it with the configured template, then passes -// the output to configured output sink -func (h *TextHandler) Log(logger_name string, level LogLevel, msg string, - calldepth int) { - h.mtx.RLock() - output, template := h.output, h.template - h.mtx.RUnlock() - event := LogEvent{ - LoggerName: logger_name, - Level: level, - Message: strings.TrimRight(msg, "\n\r"), - Timestamp: time.Now()} - if calldepth >= 0 { - _, event.Filepath, event.Line, _ = runtime.Caller(calldepth + 1) - } - var buf bytes.Buffer - err := template.Execute(&buf, &event) - if err != nil { - output.Output(level, []byte( - fmt.Sprintf("log format template failed: %s", err))) - return - } - output.Output(level, buf.Bytes()) -} - -// SetTextTemplate changes the TextHandler's text formatting template -func (h *TextHandler) SetTextTemplate(t *template.Template) { - h.mtx.Lock() - defer h.mtx.Unlock() - h.template = t -} - -// SetTextOutput changes the TextHandler's TextOutput sink -func (h *TextHandler) SetTextOutput(output TextOutput) { - h.mtx.Lock() - defer h.mtx.Unlock() - h.output = output -} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index fa1245b1..2924cf3a 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -8,7 +8,6 @@ import ( "fmt" "math" "os" - "path/filepath" "reflect" "regexp" "runtime" @@ -141,12 +140,11 @@ func CallerInfo() []string { } parts := strings.Split(file, "/") - file = parts[len(parts)-1] if len(parts) > 1 { + filename := parts[len(parts)-1] dir := parts[len(parts)-2] - if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - path, _ := filepath.Abs(file) - callers = append(callers, fmt.Sprintf("%s:%d", path, line)) + if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) } } @@ -530,7 +528,7 @@ func isNil(object interface{}) bool { []reflect.Kind{ reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice}, + reflect.Ptr, reflect.Slice, reflect.UnsafePointer}, kind) if isNilableKind && value.IsNil() { @@ -818,49 +816,44 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true // we consider nil to be equal to the nil set } - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } + subsetKind := reflect.TypeOf(subset).Kind() if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } - subsetValue := reflect.ValueOf(subset) if subsetKind == reflect.Map && listKind == reflect.Map { - listValue := reflect.ValueOf(list) - subsetKeys := subsetValue.MapKeys() + subsetMap := reflect.ValueOf(subset) + actualMap := reflect.ValueOf(list) - for i := 0; i < len(subsetKeys); i++ { - subsetKey := subsetKeys[i] - subsetElement := subsetValue.MapIndex(subsetKey).Interface() - listElement := listValue.MapIndex(subsetKey).Interface() + for _, k := range subsetMap.MapKeys() { + ev := subsetMap.MapIndex(k) + av := actualMap.MapIndex(k) - if !ObjectsAreEqual(subsetElement, listElement) { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...) + if !av.IsValid() { + return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...) + } + if !ObjectsAreEqual(ev.Interface(), av.Interface()) { + return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...) } } return true } - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() + subsetList := reflect.ValueOf(subset) + for i := 0; i < subsetList.Len(); i++ { + element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", list), msgAndArgs...) } if !found { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...) + return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, element), msgAndArgs...) } } @@ -879,34 +872,28 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...) } - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) } + subsetKind := reflect.TypeOf(subset).Kind() if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } - subsetValue := reflect.ValueOf(subset) if subsetKind == reflect.Map && listKind == reflect.Map { - listValue := reflect.ValueOf(list) - subsetKeys := subsetValue.MapKeys() + subsetMap := reflect.ValueOf(subset) + actualMap := reflect.ValueOf(list) - for i := 0; i < len(subsetKeys); i++ { - subsetKey := subsetKeys[i] - subsetElement := subsetValue.MapIndex(subsetKey).Interface() - listElement := listValue.MapIndex(subsetKey).Interface() + for _, k := range subsetMap.MapKeys() { + ev := subsetMap.MapIndex(k) + av := actualMap.MapIndex(k) - if !ObjectsAreEqual(subsetElement, listElement) { + if !av.IsValid() { + return true + } + if !ObjectsAreEqual(ev.Interface(), av.Interface()) { return true } } @@ -914,8 +901,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) } - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() + subsetList := reflect.ValueOf(subset) + for i := 0; i < subsetList.Len(); i++ { + element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) if !ok { return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) diff --git a/vendor/go.uber.org/fx/CHANGELOG.md b/vendor/go.uber.org/fx/CHANGELOG.md index 208cb39e..9f58ac17 100644 --- a/vendor/go.uber.org/fx/CHANGELOG.md +++ b/vendor/go.uber.org/fx/CHANGELOG.md @@ -10,6 +10,11 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## [1.19.2](https://github.com/uber-go/fx/compare/v1.19.1...v1.19.2) - 2023-02-21 +### Changed +- Upgrade Dig dependency to v1.16.1. + + ## [1.19.1](https://github.com/uber-go/fx/compare/v1.18.0...v1.19.1) - 2023-01-10 ### Changed - Calling `fx.Stop()` after the `App` has already stopped no longer errors out. diff --git a/vendor/go.uber.org/fx/version.go b/vendor/go.uber.org/fx/version.go index 486815e6..971d8b95 100644 --- a/vendor/go.uber.org/fx/version.go +++ b/vendor/go.uber.org/fx/version.go @@ -21,4 +21,4 @@ package fx // Version is exported for runtime compatibility checks. -const Version = "1.19.1" +const Version = "1.19.2" diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md index d2c8aada..f8177b97 100644 --- a/vendor/go.uber.org/multierr/CHANGELOG.md +++ b/vendor/go.uber.org/multierr/CHANGELOG.md @@ -1,6 +1,21 @@ Releases ======== +v1.11.0 (2023-03-28) +==================== +- `Errors` now supports any error that implements multiple-error + interface. +- Add `Every` function to allow checking if all errors in the chain + satisfies `errors.Is` against the target error. + +v1.10.0 (2023-03-08) +==================== + +- Comply with Go 1.20's multiple-error interface. +- Drop Go 1.18 support. + Per the support policy, only Go 1.19 and 1.20 are supported now. +- Drop all non-test external dependencies. + v1.9.0 (2022-12-12) =================== diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md index 70aacecd..5ab6ac40 100644 --- a/vendor/go.uber.org/multierr/README.md +++ b/vendor/go.uber.org/multierr/README.md @@ -2,9 +2,29 @@ `multierr` allows combining one or more Go `error`s together. +## Features + +- **Idiomatic**: + multierr follows best practices in Go, and keeps your code idiomatic. + - It keeps the underlying error type hidden, + allowing you to deal in `error` values exclusively. + - It provides APIs to safely append into an error from a `defer` statement. +- **Performant**: + multierr is optimized for performance: + - It avoids allocations where possible. + - It utilizes slice resizing semantics to optimize common cases + like appending into the same error object from a loop. +- **Interoperable**: + multierr interoperates with the Go standard library's error APIs seamlessly: + - The `errors.Is` and `errors.As` functions *just work*. +- **Lightweight**: + multierr comes with virtually no dependencies. + ## Installation - go get -u go.uber.org/multierr +```bash +go get -u go.uber.org/multierr@latest +``` ## Status diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go index cdd91ae5..3a828b2d 100644 --- a/vendor/go.uber.org/multierr/error.go +++ b/vendor/go.uber.org/multierr/error.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017-2021 Uber Technologies, Inc. +// Copyright (c) 2017-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -147,8 +147,7 @@ import ( "io" "strings" "sync" - - "go.uber.org/atomic" + "sync/atomic" ) var ( @@ -196,23 +195,7 @@ type errorGroup interface { // // Callers of this function are free to modify the returned slice. func Errors(err error) []error { - if err == nil { - return nil - } - - // Note that we're casting to multiError, not errorGroup. Our contract is - // that returned errors MAY implement errorGroup. Errors, however, only - // has special behavior for multierr-specific error objects. - // - // This behavior can be expanded in the future but I think it's prudent to - // start with as little as possible in terms of contract and possibility - // of misuse. - eg, ok := err.(*multiError) - if !ok { - return []error{err} - } - - return append(([]error)(nil), eg.Errors()...) + return extractErrors(err) } // multiError is an error that holds one or more errors. @@ -227,8 +210,6 @@ type multiError struct { errors []error } -var _ errorGroup = (*multiError)(nil) - // Errors returns the list of underlying errors. // // This slice MUST NOT be modified. @@ -239,33 +220,6 @@ func (merr *multiError) Errors() []error { return merr.errors } -// As attempts to find the first error in the error list that matches the type -// of the value that target points to. -// -// This function allows errors.As to traverse the values stored on the -// multierr error. -func (merr *multiError) As(target interface{}) bool { - for _, err := range merr.Errors() { - if errors.As(err, target) { - return true - } - } - return false -} - -// Is attempts to match the provided error against errors in the error list. -// -// This function allows errors.Is to traverse the values stored on the -// multierr error. -func (merr *multiError) Is(target error) bool { - for _, err := range merr.Errors() { - if errors.Is(err, target) { - return true - } - } - return false -} - func (merr *multiError) Error() string { if merr == nil { return "" @@ -281,6 +235,17 @@ func (merr *multiError) Error() string { return result } +// Every compares every error in the given err against the given target error +// using [errors.Is], and returns true only if every comparison returned true. +func Every(err error, target error) bool { + for _, e := range extractErrors(err) { + if !errors.Is(e, target) { + return false + } + } + return true +} + func (merr *multiError) Format(f fmt.State, c rune) { if c == 'v' && f.Flag('+') { merr.writeMultiline(f) diff --git a/vendor/go.uber.org/multierr/error_post_go120.go b/vendor/go.uber.org/multierr/error_post_go120.go new file mode 100644 index 00000000..a173f9c2 --- /dev/null +++ b/vendor/go.uber.org/multierr/error_post_go120.go @@ -0,0 +1,48 @@ +// Copyright (c) 2017-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.20 +// +build go1.20 + +package multierr + +// Unwrap returns a list of errors wrapped by this multierr. +func (merr *multiError) Unwrap() []error { + return merr.Errors() +} + +type multipleErrors interface { + Unwrap() []error +} + +func extractErrors(err error) []error { + if err == nil { + return nil + } + + // check if the given err is an Unwrapable error that + // implements multipleErrors interface. + eg, ok := err.(multipleErrors) + if !ok { + return []error{err} + } + + return append(([]error)(nil), eg.Unwrap()...) +} diff --git a/vendor/go.uber.org/multierr/error_pre_go120.go b/vendor/go.uber.org/multierr/error_pre_go120.go new file mode 100644 index 00000000..93872a3f --- /dev/null +++ b/vendor/go.uber.org/multierr/error_pre_go120.go @@ -0,0 +1,79 @@ +// Copyright (c) 2017-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build !go1.20 +// +build !go1.20 + +package multierr + +import "errors" + +// Versions of Go before 1.20 did not support the Unwrap() []error method. +// This provides a similar behavior by implementing the Is(..) and As(..) +// methods. +// See the errors.Join proposal for details: +// https://github.com/golang/go/issues/53435 + +// As attempts to find the first error in the error list that matches the type +// of the value that target points to. +// +// This function allows errors.As to traverse the values stored on the +// multierr error. +func (merr *multiError) As(target interface{}) bool { + for _, err := range merr.Errors() { + if errors.As(err, target) { + return true + } + } + return false +} + +// Is attempts to match the provided error against errors in the error list. +// +// This function allows errors.Is to traverse the values stored on the +// multierr error. +func (merr *multiError) Is(target error) bool { + for _, err := range merr.Errors() { + if errors.Is(err, target) { + return true + } + } + return false +} + +func extractErrors(err error) []error { + if err == nil { + return nil + } + + // Note that we're casting to multiError, not errorGroup. Our contract is + // that returned errors MAY implement errorGroup. Errors, however, only + // has special behavior for multierr-specific error objects. + // + // This behavior can be expanded in the future but I think it's prudent to + // start with as little as possible in terms of contract and possibility + // of misuse. + eg, ok := err.(*multiError) + if !ok { + return []error{err} + } + + return append(([]error)(nil), eg.Errors()...) +} diff --git a/vendor/go.uber.org/multierr/glide.yaml b/vendor/go.uber.org/multierr/glide.yaml deleted file mode 100644 index 6ef084ec..00000000 --- a/vendor/go.uber.org/multierr/glide.yaml +++ /dev/null @@ -1,8 +0,0 @@ -package: go.uber.org/multierr -import: -- package: go.uber.org/atomic - version: ^1 -testImport: -- package: github.com/stretchr/testify - subpackages: - - assert diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go new file mode 100644 index 00000000..cff0cd49 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -0,0 +1,258 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package slices defines various functions useful with slices of any type. +// Unless otherwise specified, these functions all apply to the elements +// of a slice at index 0 <= i < len(s). +// +// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a +// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings), +// or the sorting may fail to sort correctly. A common case is when sorting slices of +// floating-point numbers containing NaN values. +package slices + +import "golang.org/x/exp/constraints" + +// Equal reports whether two slices are equal: the same length and all +// elements equal. If the lengths are different, Equal returns false. +// Otherwise, the elements are compared in increasing index order, and the +// comparison stops at the first unequal pair. +// Floating point NaNs are not considered equal. +func Equal[E comparable](s1, s2 []E) bool { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + if s1[i] != s2[i] { + return false + } + } + return true +} + +// EqualFunc reports whether two slices are equal using a comparison +// function on each pair of elements. If the lengths are different, +// EqualFunc returns false. Otherwise, the elements are compared in +// increasing index order, and the comparison stops at the first index +// for which eq returns false. +func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { + if len(s1) != len(s2) { + return false + } + for i, v1 := range s1 { + v2 := s2[i] + if !eq(v1, v2) { + return false + } + } + return true +} + +// Compare compares the elements of s1 and s2. +// The elements are compared sequentially, starting at index 0, +// until one element is not equal to the other. +// The result of comparing the first non-matching elements is returned. +// If both slices are equal until one of them ends, the shorter slice is +// considered less than the longer one. +// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. +// Comparisons involving floating point NaNs are ignored. +func Compare[E constraints.Ordered](s1, s2 []E) int { + s2len := len(s2) + for i, v1 := range s1 { + if i >= s2len { + return +1 + } + v2 := s2[i] + switch { + case v1 < v2: + return -1 + case v1 > v2: + return +1 + } + } + if len(s1) < s2len { + return -1 + } + return 0 +} + +// CompareFunc is like Compare but uses a comparison function +// on each pair of elements. The elements are compared in increasing +// index order, and the comparisons stop after the first time cmp +// returns non-zero. +// The result is the first non-zero result of cmp; if cmp always +// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), +// and +1 if len(s1) > len(s2). +func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { + s2len := len(s2) + for i, v1 := range s1 { + if i >= s2len { + return +1 + } + v2 := s2[i] + if c := cmp(v1, v2); c != 0 { + return c + } + } + if len(s1) < s2len { + return -1 + } + return 0 +} + +// Index returns the index of the first occurrence of v in s, +// or -1 if not present. +func Index[E comparable](s []E, v E) int { + for i, vs := range s { + if v == vs { + return i + } + } + return -1 +} + +// IndexFunc returns the first index i satisfying f(s[i]), +// or -1 if none do. +func IndexFunc[E any](s []E, f func(E) bool) int { + for i, v := range s { + if f(v) { + return i + } + } + return -1 +} + +// Contains reports whether v is present in s. +func Contains[E comparable](s []E, v E) bool { + return Index(s, v) >= 0 +} + +// ContainsFunc reports whether at least one +// element e of s satisfies f(e). +func ContainsFunc[E any](s []E, f func(E) bool) bool { + return IndexFunc(s, f) >= 0 +} + +// Insert inserts the values v... into s at index i, +// returning the modified slice. +// In the returned slice r, r[i] == v[0]. +// Insert panics if i is out of range. +// This function is O(len(s) + len(v)). +func Insert[S ~[]E, E any](s S, i int, v ...E) S { + tot := len(s) + len(v) + if tot <= cap(s) { + s2 := s[:tot] + copy(s2[i+len(v):], s[i:]) + copy(s2[i:], v) + return s2 + } + s2 := make(S, tot) + copy(s2, s[:i]) + copy(s2[i:], v) + copy(s2[i+len(v):], s[i:]) + return s2 +} + +// Delete removes the elements s[i:j] from s, returning the modified slice. +// Delete panics if s[i:j] is not a valid slice of s. +// Delete modifies the contents of the slice s; it does not create a new slice. +// Delete is O(len(s)-j), so if many items must be deleted, it is better to +// make a single call deleting them all together than to delete one at a time. +// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those +// elements contain pointers you might consider zeroing those elements so that +// objects they reference can be garbage collected. +func Delete[S ~[]E, E any](s S, i, j int) S { + _ = s[i:j] // bounds check + + return append(s[:i], s[j:]...) +} + +// Replace replaces the elements s[i:j] by the given v, and returns the +// modified slice. Replace panics if s[i:j] is not a valid slice of s. +func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { + _ = s[i:j] // verify that i:j is a valid subslice + tot := len(s[:i]) + len(v) + len(s[j:]) + if tot <= cap(s) { + s2 := s[:tot] + copy(s2[i+len(v):], s[j:]) + copy(s2[i:], v) + return s2 + } + s2 := make(S, tot) + copy(s2, s[:i]) + copy(s2[i:], v) + copy(s2[i+len(v):], s[j:]) + return s2 +} + +// Clone returns a copy of the slice. +// The elements are copied using assignment, so this is a shallow clone. +func Clone[S ~[]E, E any](s S) S { + // Preserve nil in case it matters. + if s == nil { + return nil + } + return append(S([]E{}), s...) +} + +// Compact replaces consecutive runs of equal elements with a single copy. +// This is like the uniq command found on Unix. +// Compact modifies the contents of the slice s; it does not create a new slice. +// When Compact discards m elements in total, it might not modify the elements +// s[len(s)-m:len(s)]. If those elements contain pointers you might consider +// zeroing those elements so that objects they reference can be garbage collected. +func Compact[S ~[]E, E comparable](s S) S { + if len(s) < 2 { + return s + } + i := 1 + last := s[0] + for _, v := range s[1:] { + if v != last { + s[i] = v + i++ + last = v + } + } + return s[:i] +} + +// CompactFunc is like Compact but uses a comparison function. +func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { + if len(s) < 2 { + return s + } + i := 1 + last := s[0] + for _, v := range s[1:] { + if !eq(v, last) { + s[i] = v + i++ + last = v + } + } + return s[:i] +} + +// Grow increases the slice's capacity, if necessary, to guarantee space for +// another n elements. After Grow(n), at least n elements can be appended +// to the slice without another allocation. If n is negative or too large to +// allocate the memory, Grow panics. +func Grow[S ~[]E, E any](s S, n int) S { + if n < 0 { + panic("cannot be negative") + } + if n -= cap(s) - len(s); n > 0 { + // TODO(https://go.dev/issue/53888): Make using []E instead of S + // to workaround a compiler bug where the runtime.growslice optimization + // does not take effect. Revert when the compiler is fixed. + s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] + } + return s +} + +// Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. +func Clip[S ~[]E, E any](s S) S { + return s[:len(s):len(s)] +} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go new file mode 100644 index 00000000..f14f40da --- /dev/null +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -0,0 +1,126 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import ( + "math/bits" + + "golang.org/x/exp/constraints" +) + +// Sort sorts a slice of any ordered type in ascending order. +// Sort may fail to sort correctly when sorting slices of floating-point +// numbers containing Not-a-number (NaN) values. +// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))}) +// instead if the input may contain NaNs. +func Sort[E constraints.Ordered](x []E) { + n := len(x) + pdqsortOrdered(x, 0, n, bits.Len(uint(n))) +} + +// SortFunc sorts the slice x in ascending order as determined by the less function. +// This sort is not guaranteed to be stable. +// +// SortFunc requires that less is a strict weak ordering. +// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. +func SortFunc[E any](x []E, less func(a, b E) bool) { + n := len(x) + pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) +} + +// SortStableFunc sorts the slice x while keeping the original order of equal +// elements, using less to compare elements. +func SortStableFunc[E any](x []E, less func(a, b E) bool) { + stableLessFunc(x, len(x), less) +} + +// IsSorted reports whether x is sorted in ascending order. +func IsSorted[E constraints.Ordered](x []E) bool { + for i := len(x) - 1; i > 0; i-- { + if x[i] < x[i-1] { + return false + } + } + return true +} + +// IsSortedFunc reports whether x is sorted in ascending order, with less as the +// comparison function. +func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool { + for i := len(x) - 1; i > 0; i-- { + if less(x[i], x[i-1]) { + return false + } + } + return true +} + +// BinarySearch searches for target in a sorted slice and returns the position +// where target is found, or the position where target would appear in the +// sort order; it also returns a bool saying whether the target is really found +// in the slice. The slice must be sorted in increasing order. +func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { + // Inlining is faster than calling BinarySearchFunc with a lambda. + n := len(x) + // Define x[-1] < target and x[n] >= target. + // Invariant: x[i-1] < target, x[j] >= target. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if x[h] < target { + i = h + 1 // preserves x[i-1] < target + } else { + j = h // preserves x[j] >= target + } + } + // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. + return i, i < n && x[i] == target +} + +// BinarySearchFunc works like BinarySearch, but uses a custom comparison +// function. The slice must be sorted in increasing order, where "increasing" is +// defined by cmp. cmp(a, b) is expected to return an integer comparing the two +// parameters: 0 if a == b, a negative number if a < b and a positive number if +// a > b. +func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) { + n := len(x) + // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . + // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if cmp(x[h], target) < 0 { + i = h + 1 // preserves cmp(x[i - 1], target) < 0 + } else { + j = h // preserves cmp(x[j], target) >= 0 + } + } + // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i. + return i, i < n && cmp(x[i], target) == 0 +} + +type sortedHint int // hint for pdqsort when choosing the pivot + +const ( + unknownHint sortedHint = iota + increasingHint + decreasingHint +) + +// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf +type xorshift uint64 + +func (r *xorshift) Next() uint64 { + *r ^= *r << 13 + *r ^= *r >> 17 + *r ^= *r << 5 + return uint64(*r) +} + +func nextPowerOfTwo(length int) uint { + return 1 << bits.Len(uint(length)) +} diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortfunc.go new file mode 100644 index 00000000..2a632476 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortfunc.go @@ -0,0 +1,479 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +// insertionSortLessFunc sorts data[a:b] using insertion sort. +func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + for i := a + 1; i < b; i++ { + for j := i; j > a && less(data[j], data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownLessFunc implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && less(data[first+child], data[first+child+1]) { + child++ + } + if !less(data[first+root], data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownLessFunc(data, i, hi, first, less) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownLessFunc(data, lo, i, first, less) + } +} + +// pdqsortLessFunc sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortLessFunc(data, a, b, less) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortLessFunc(data, a, b, less) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsLessFunc(data, a, b, less) + limit-- + } + + pivot, hint := choosePivotLessFunc(data, a, b, less) + if hint == decreasingHint { + reverseRangeLessFunc(data, a, b, less) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortLessFunc(data, a, b, less) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !less(data[a-1], data[pivot]) { + mid := partitionEqualLessFunc(data, a, b, pivot, less) + a = mid + continue + } + + mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortLessFunc(data, a, mid, limit, less) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortLessFunc(data, mid+1, b, limit, less) + b = mid + } + } +} + +// partitionLessFunc does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && less(data[i], data[a]) { + i++ + } + for i <= j && !less(data[j], data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && less(data[i], data[a]) { + i++ + } + for i <= j && !less(data[j], data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !less(data[a], data[i]) { + i++ + } + for i <= j && less(data[a], data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !less(data[i], data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !less(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !less(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotLessFunc chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentLessFunc(data, i, &swaps, less) + j = medianAdjacentLessFunc(data, j, &swaps, less) + k = medianAdjacentLessFunc(data, k, &swaps, less) + } + // Find the median among i, j, k and stores it into j. + j = medianLessFunc(data, i, j, k, &swaps, less) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) { + if less(data[b], data[a]) { + *swaps++ + return b, a + } + return a, b +} + +// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int { + a, b = order2LessFunc(data, a, b, swaps, less) + b, c = order2LessFunc(data, b, c, swaps, less) + a, b = order2LessFunc(data, a, b, swaps, less) + return b +} + +// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int { + return medianLessFunc(data, a-1, a, a+1, swaps, less) +} + +func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortLessFunc(data, a, b, less) + a = b + b += blockSize + } + insertionSortLessFunc(data, a, n, less) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeLessFunc(data, a, a+blockSize, b, less) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeLessFunc(data, a, m, n, less) + } + blockSize *= 2 + } +} + +// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if less(data[h], data[a]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !less(data[m], data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !less(data[p-c], data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateLessFunc(data, start, m, end, less) + } + if a < start && start < mid { + symMergeLessFunc(data, a, start, mid, less) + } + if mid < end && end < b { + symMergeLessFunc(data, mid, end, b, less) + } +} + +// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeLessFunc(data, m-i, m, j, less) + i -= j + } else { + swapRangeLessFunc(data, m-i, m+j-i, i, less) + j -= i + } + } + // i == j + swapRangeLessFunc(data, m-i, m, i, less) +} diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go new file mode 100644 index 00000000..efaa1c8b --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortordered.go @@ -0,0 +1,481 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// insertionSortOrdered sorts data[a:b] using insertion sort. +func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (data[j] < data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownOrdered implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && (data[first+child] < data[first+child+1]) { + child++ + } + if !(data[first+root] < data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownOrdered(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownOrdered(data, lo, i, first) + } +} + +// pdqsortOrdered sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortOrdered(data, a, b) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortOrdered(data, a, b) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsOrdered(data, a, b) + limit-- + } + + pivot, hint := choosePivotOrdered(data, a, b) + if hint == decreasingHint { + reverseRangeOrdered(data, a, b) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortOrdered(data, a, b) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !(data[a-1] < data[pivot]) { + mid := partitionEqualOrdered(data, a, b, pivot) + a = mid + continue + } + + mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortOrdered(data, a, mid, limit) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortOrdered(data, mid+1, b, limit) + b = mid + } + } +} + +// partitionOrdered does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && (data[i] < data[a]) { + i++ + } + for i <= j && !(data[j] < data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && (data[i] < data[a]) { + i++ + } + for i <= j && !(data[j] < data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !(data[a] < data[i]) { + i++ + } + for i <= j && (data[a] < data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !(data[i] < data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !(data[j] < data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !(data[j] < data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsOrdered scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotOrdered chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentOrdered(data, i, &swaps) + j = medianAdjacentOrdered(data, j, &swaps) + k = medianAdjacentOrdered(data, k, &swaps) + } + // Find the median among i, j, k and stores it into j. + j = medianOrdered(data, i, j, k, &swaps) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { + if data[b] < data[a] { + *swaps++ + return b, a + } + return a, b +} + +// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { + a, b = order2Ordered(data, a, b, swaps) + b, c = order2Ordered(data, b, c, swaps) + a, b = order2Ordered(data, a, b, swaps) + return b +} + +// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { + return medianOrdered(data, a-1, a, a+1, swaps) +} + +func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableOrdered[E constraints.Ordered](data []E, n int) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortOrdered(data, a, b) + a = b + b += blockSize + } + insertionSortOrdered(data, a, n) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeOrdered(data, a, a+blockSize, b) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeOrdered(data, a, m, n) + } + blockSize *= 2 + } +} + +// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if data[h] < data[a] { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !(data[m] < data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !(data[p-c] < data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateOrdered(data, start, m, end) + } + if a < start && start < mid { + symMergeOrdered(data, a, start, mid) + } + if mid < end && end < b { + symMergeOrdered(data, mid, end, b) + } +} + +// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeOrdered(data, m-i, m, j) + i -= j + } else { + swapRangeOrdered(data, m-i, m+j-i, i) + j -= i + } + } + // i == j + swapRangeOrdered(data, m-i, m, i) +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go deleted file mode 100644 index 37dc0cfd..00000000 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ctxhttp provides helper functions for performing context-aware HTTP requests. -package ctxhttp // import "golang.org/x/net/context/ctxhttp" - -import ( - "context" - "io" - "net/http" - "net/url" - "strings" -) - -// Do sends an HTTP request with the provided http.Client and returns -// an HTTP response. -// -// If the client is nil, http.DefaultClient is used. -// -// The provided ctx must be non-nil. If it is canceled or times out, -// ctx.Err() will be returned. -func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - resp, err := client.Do(req.WithContext(ctx)) - // If we got an error, and the context has been canceled, - // the context's error is probably more useful. - if err != nil { - select { - case <-ctx.Done(): - err = ctx.Err() - default: - } - } - return resp, err -} - -// Get issues a GET request via the Do function. -func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Head issues a HEAD request via the Do function. -func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Post issues a POST request via the Do function. -func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return Do(ctx, client, req) -} - -// PostForm issues a POST request via the Do function. -func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { - return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index 355c3869..b4723fca 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -19,8 +19,6 @@ import ( "strings" "sync" "time" - - "golang.org/x/net/context/ctxhttp" ) // Token represents the credentials used to authorize @@ -229,7 +227,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, } func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { - r, err := ctxhttp.Do(ctx, ContextClient(ctx), req) + r, err := ContextClient(ctx).Do(req.WithContext(ctx)) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/tools/cmd/goimports/doc.go b/vendor/golang.org/x/tools/cmd/goimports/doc.go new file mode 100644 index 00000000..18a3ad44 --- /dev/null +++ b/vendor/golang.org/x/tools/cmd/goimports/doc.go @@ -0,0 +1,50 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Command goimports updates your Go import lines, +adding missing ones and removing unreferenced ones. + + $ go install golang.org/x/tools/cmd/goimports@latest + +In addition to fixing imports, goimports also formats +your code in the same style as gofmt so it can be used +as a replacement for your editor's gofmt-on-save hook. + +For emacs, make sure you have the latest go-mode.el: + + https://github.com/dominikh/go-mode.el + +Then in your .emacs file: + + (setq gofmt-command "goimports") + (add-hook 'before-save-hook 'gofmt-before-save) + +For vim, set "gofmt_command" to "goimports": + + https://golang.org/change/39c724dd7f252 + https://golang.org/wiki/IDEsAndTextEditorPlugins + etc + +For GoSublime, follow the steps described here: + + http://michaelwhatcott.com/gosublime-goimports/ + +For other editors, you probably know what to do. + +To exclude directories in your $GOPATH from being scanned for Go +files, goimports respects a configuration file at +$GOPATH/src/.goimportsignore which may contain blank lines, comment +lines (beginning with '#'), or lines naming a directory relative to +the configuration file to ignore when scanning. No globbing or regex +patterns are allowed. Use the "-v" verbose flag to verify it's +working and see what goimports is doing. + +File bugs or feature requests at: + + https://golang.org/issues/new?title=x/tools/cmd/goimports:+ + +Happy hacking! +*/ +package main // import "golang.org/x/tools/cmd/goimports" diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports.go b/vendor/golang.org/x/tools/cmd/goimports/goimports.go new file mode 100644 index 00000000..b354c9e8 --- /dev/null +++ b/vendor/golang.org/x/tools/cmd/goimports/goimports.go @@ -0,0 +1,380 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "go/scanner" + exec "golang.org/x/sys/execabs" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "runtime" + "runtime/pprof" + "strings" + + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/imports" +) + +var ( + // main operation modes + list = flag.Bool("l", false, "list files whose formatting differs from goimport's") + write = flag.Bool("w", false, "write result to (source) file instead of stdout") + doDiff = flag.Bool("d", false, "display diffs instead of rewriting files") + srcdir = flag.String("srcdir", "", "choose imports as if source code is from `dir`. When operating on a single file, dir may instead be the complete file name.") + + verbose bool // verbose logging + + cpuProfile = flag.String("cpuprofile", "", "CPU profile output") + memProfile = flag.String("memprofile", "", "memory profile output") + memProfileRate = flag.Int("memrate", 0, "if > 0, sets runtime.MemProfileRate") + + options = &imports.Options{ + TabWidth: 8, + TabIndent: true, + Comments: true, + Fragment: true, + Env: &imports.ProcessEnv{ + GocmdRunner: &gocommand.Runner{}, + }, + } + exitCode = 0 +) + +func init() { + flag.BoolVar(&options.AllErrors, "e", false, "report all errors (not just the first 10 on different lines)") + flag.StringVar(&options.LocalPrefix, "local", "", "put imports beginning with this string after 3rd-party packages; comma-separated list") + flag.BoolVar(&options.FormatOnly, "format-only", false, "if true, don't fix imports and only format. In this mode, goimports is effectively gofmt, with the addition that imports are grouped into sections.") +} + +func report(err error) { + scanner.PrintError(os.Stderr, err) + exitCode = 2 +} + +func usage() { + fmt.Fprintf(os.Stderr, "usage: goimports [flags] [path ...]\n") + flag.PrintDefaults() + os.Exit(2) +} + +func isGoFile(f os.FileInfo) bool { + // ignore non-Go files + name := f.Name() + return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") +} + +// argumentType is which mode goimports was invoked as. +type argumentType int + +const ( + // fromStdin means the user is piping their source into goimports. + fromStdin argumentType = iota + + // singleArg is the common case from editors, when goimports is run on + // a single file. + singleArg + + // multipleArg is when the user ran "goimports file1.go file2.go" + // or ran goimports on a directory tree. + multipleArg +) + +func processFile(filename string, in io.Reader, out io.Writer, argType argumentType) error { + opt := options + if argType == fromStdin { + nopt := *options + nopt.Fragment = true + opt = &nopt + } + + if in == nil { + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + in = f + } + + src, err := ioutil.ReadAll(in) + if err != nil { + return err + } + + target := filename + if *srcdir != "" { + // Determine whether the provided -srcdirc is a directory or file + // and then use it to override the target. + // + // See https://github.com/dominikh/go-mode.el/issues/146 + if isFile(*srcdir) { + if argType == multipleArg { + return errors.New("-srcdir value can't be a file when passing multiple arguments or when walking directories") + } + target = *srcdir + } else if argType == singleArg && strings.HasSuffix(*srcdir, ".go") && !isDir(*srcdir) { + // For a file which doesn't exist on disk yet, but might shortly. + // e.g. user in editor opens $DIR/newfile.go and newfile.go doesn't yet exist on disk. + // The goimports on-save hook writes the buffer to a temp file + // first and runs goimports before the actual save to newfile.go. + // The editor's buffer is named "newfile.go" so that is passed to goimports as: + // goimports -srcdir=/gopath/src/pkg/newfile.go /tmp/gofmtXXXXXXXX.go + // and then the editor reloads the result from the tmp file and writes + // it to newfile.go. + target = *srcdir + } else { + // Pretend that file is from *srcdir in order to decide + // visible imports correctly. + target = filepath.Join(*srcdir, filepath.Base(filename)) + } + } + + res, err := imports.Process(target, src, opt) + if err != nil { + return err + } + + if !bytes.Equal(src, res) { + // formatting has changed + if *list { + fmt.Fprintln(out, filename) + } + if *write { + if argType == fromStdin { + // filename is "" + return errors.New("can't use -w on stdin") + } + // On Windows, we need to re-set the permissions from the file. See golang/go#38225. + var perms os.FileMode + if fi, err := os.Stat(filename); err == nil { + perms = fi.Mode() & os.ModePerm + } + err = ioutil.WriteFile(filename, res, perms) + if err != nil { + return err + } + } + if *doDiff { + if argType == fromStdin { + filename = "stdin.go" // because .orig looks silly + } + data, err := diff(src, res, filename) + if err != nil { + return fmt.Errorf("computing diff: %s", err) + } + fmt.Printf("diff -u %s %s\n", filepath.ToSlash(filename+".orig"), filepath.ToSlash(filename)) + out.Write(data) + } + } + + if !*list && !*write && !*doDiff { + _, err = out.Write(res) + } + + return err +} + +func visitFile(path string, f os.FileInfo, err error) error { + if err == nil && isGoFile(f) { + err = processFile(path, nil, os.Stdout, multipleArg) + } + if err != nil { + report(err) + } + return nil +} + +func walkDir(path string) { + filepath.Walk(path, visitFile) +} + +func main() { + runtime.GOMAXPROCS(runtime.NumCPU()) + + // call gofmtMain in a separate function + // so that it can use defer and have them + // run before the exit. + gofmtMain() + os.Exit(exitCode) +} + +// parseFlags parses command line flags and returns the paths to process. +// It's a var so that custom implementations can replace it in other files. +var parseFlags = func() []string { + flag.BoolVar(&verbose, "v", false, "verbose logging") + + flag.Parse() + return flag.Args() +} + +func bufferedFileWriter(dest string) (w io.Writer, close func()) { + f, err := os.Create(dest) + if err != nil { + log.Fatal(err) + } + bw := bufio.NewWriter(f) + return bw, func() { + if err := bw.Flush(); err != nil { + log.Fatalf("error flushing %v: %v", dest, err) + } + if err := f.Close(); err != nil { + log.Fatal(err) + } + } +} + +func gofmtMain() { + flag.Usage = usage + paths := parseFlags() + + if *cpuProfile != "" { + bw, flush := bufferedFileWriter(*cpuProfile) + pprof.StartCPUProfile(bw) + defer flush() + defer pprof.StopCPUProfile() + } + // doTrace is a conditionally compiled wrapper around runtime/trace. It is + // used to allow goimports to compile under gccgo, which does not support + // runtime/trace. See https://golang.org/issue/15544. + defer doTrace()() + if *memProfileRate > 0 { + runtime.MemProfileRate = *memProfileRate + bw, flush := bufferedFileWriter(*memProfile) + defer func() { + runtime.GC() // materialize all statistics + if err := pprof.WriteHeapProfile(bw); err != nil { + log.Fatal(err) + } + flush() + }() + } + + if verbose { + log.SetFlags(log.LstdFlags | log.Lmicroseconds) + options.Env.Logf = log.Printf + } + if options.TabWidth < 0 { + fmt.Fprintf(os.Stderr, "negative tabwidth %d\n", options.TabWidth) + exitCode = 2 + return + } + + if len(paths) == 0 { + if err := processFile("", os.Stdin, os.Stdout, fromStdin); err != nil { + report(err) + } + return + } + + argType := singleArg + if len(paths) > 1 { + argType = multipleArg + } + + for _, path := range paths { + switch dir, err := os.Stat(path); { + case err != nil: + report(err) + case dir.IsDir(): + walkDir(path) + default: + if err := processFile(path, nil, os.Stdout, argType); err != nil { + report(err) + } + } + } +} + +func writeTempFile(dir, prefix string, data []byte) (string, error) { + file, err := ioutil.TempFile(dir, prefix) + if err != nil { + return "", err + } + _, err = file.Write(data) + if err1 := file.Close(); err == nil { + err = err1 + } + if err != nil { + os.Remove(file.Name()) + return "", err + } + return file.Name(), nil +} + +func diff(b1, b2 []byte, filename string) (data []byte, err error) { + f1, err := writeTempFile("", "gofmt", b1) + if err != nil { + return + } + defer os.Remove(f1) + + f2, err := writeTempFile("", "gofmt", b2) + if err != nil { + return + } + defer os.Remove(f2) + + cmd := "diff" + if runtime.GOOS == "plan9" { + cmd = "/bin/ape/diff" + } + + data, err = exec.Command(cmd, "-u", f1, f2).CombinedOutput() + if len(data) > 0 { + // diff exits with a non-zero status when the files don't match. + // Ignore that failure as long as we get output. + return replaceTempFilename(data, filename) + } + return +} + +// replaceTempFilename replaces temporary filenames in diff with actual one. +// +// --- /tmp/gofmt316145376 2017-02-03 19:13:00.280468375 -0500 +// +++ /tmp/gofmt617882815 2017-02-03 19:13:00.280468375 -0500 +// ... +// -> +// --- path/to/file.go.orig 2017-02-03 19:13:00.280468375 -0500 +// +++ path/to/file.go 2017-02-03 19:13:00.280468375 -0500 +// ... +func replaceTempFilename(diff []byte, filename string) ([]byte, error) { + bs := bytes.SplitN(diff, []byte{'\n'}, 3) + if len(bs) < 3 { + return nil, fmt.Errorf("got unexpected diff for %s", filename) + } + // Preserve timestamps. + var t0, t1 []byte + if i := bytes.LastIndexByte(bs[0], '\t'); i != -1 { + t0 = bs[0][i:] + } + if i := bytes.LastIndexByte(bs[1], '\t'); i != -1 { + t1 = bs[1][i:] + } + // Always print filepath with slash separator. + f := filepath.ToSlash(filename) + bs[0] = []byte(fmt.Sprintf("--- %s%s", f+".orig", t0)) + bs[1] = []byte(fmt.Sprintf("+++ %s%s", f, t1)) + return bytes.Join(bs, []byte{'\n'}), nil +} + +// isFile reports whether name is a file. +func isFile(name string) bool { + fi, err := os.Stat(name) + return err == nil && fi.Mode().IsRegular() +} + +// isDir reports whether name is a directory. +func isDir(name string) bool { + fi, err := os.Stat(name) + return err == nil && fi.IsDir() +} diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go b/vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go new file mode 100644 index 00000000..190a5653 --- /dev/null +++ b/vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go @@ -0,0 +1,27 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc +// +build gc + +package main + +import ( + "flag" + "runtime/trace" +) + +var traceProfile = flag.String("trace", "", "trace profile output") + +func doTrace() func() { + if *traceProfile != "" { + bw, flush := bufferedFileWriter(*traceProfile) + trace.Start(bw) + return func() { + flush() + trace.Stop() + } + } + return func() {} +} diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports_not_gc.go b/vendor/golang.org/x/tools/cmd/goimports/goimports_not_gc.go new file mode 100644 index 00000000..344fe757 --- /dev/null +++ b/vendor/golang.org/x/tools/cmd/goimports/goimports_not_gc.go @@ -0,0 +1,12 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !gc +// +build !gc + +package main + +func doTrace() func() { + return func() {} +} diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go new file mode 100644 index 00000000..be8f5a86 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -0,0 +1,762 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package objectpath defines a naming scheme for types.Objects +// (that is, named entities in Go programs) relative to their enclosing +// package. +// +// Type-checker objects are canonical, so they are usually identified by +// their address in memory (a pointer), but a pointer has meaning only +// within one address space. By contrast, objectpath names allow the +// identity of an object to be sent from one program to another, +// establishing a correspondence between types.Object variables that are +// distinct but logically equivalent. +// +// A single object may have multiple paths. In this example, +// +// type A struct{ X int } +// type B A +// +// the field X has two paths due to its membership of both A and B. +// The For(obj) function always returns one of these paths, arbitrarily +// but consistently. +package objectpath + +import ( + "fmt" + "go/types" + "sort" + "strconv" + "strings" + + "golang.org/x/tools/internal/typeparams" + + _ "unsafe" // for go:linkname +) + +// A Path is an opaque name that identifies a types.Object +// relative to its package. Conceptually, the name consists of a +// sequence of destructuring operations applied to the package scope +// to obtain the original object. +// The name does not include the package itself. +type Path string + +// Encoding +// +// An object path is a textual and (with training) human-readable encoding +// of a sequence of destructuring operators, starting from a types.Package. +// The sequences represent a path through the package/object/type graph. +// We classify these operators by their type: +// +// PO package->object Package.Scope.Lookup +// OT object->type Object.Type +// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU] +// TO type->object Type.{At,Field,Method,Obj} [AFMO] +// +// All valid paths start with a package and end at an object +// and thus may be defined by the regular language: +// +// objectpath = PO (OT TT* TO)* +// +// The concrete encoding follows directly: +// - The only PO operator is Package.Scope.Lookup, which requires an identifier. +// - The only OT operator is Object.Type, +// which we encode as '.' because dot cannot appear in an identifier. +// - The TT operators are encoded as [EKPRUTC]; +// one of these (TypeParam) requires an integer operand, +// which is encoded as a string of decimal digits. +// - The TO operators are encoded as [AFMO]; +// three of these (At,Field,Method) require an integer operand, +// which is encoded as a string of decimal digits. +// These indices are stable across different representations +// of the same package, even source and export data. +// The indices used are implementation specific and may not correspond to +// the argument to the go/types function. +// +// In the example below, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// field X has the path "T.UM0.RA1.F0", +// representing the following sequence of operations: +// +// p.Lookup("T") T +// .Type().Underlying().Method(0). f +// .Type().Results().At(1) b +// .Type().Field(0) X +// +// The encoding is not maximally compact---every R or P is +// followed by an A, for example---but this simplifies the +// encoder and decoder. +const ( + // object->type operators + opType = '.' // .Type() (Object) + + // type->type operators + opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) + opKey = 'K' // .Key() (Map) + opParams = 'P' // .Params() (Signature) + opResults = 'R' // .Results() (Signature) + opUnderlying = 'U' // .Underlying() (Named) + opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) + opConstraint = 'C' // .Constraint() (TypeParam) + + // type->object operators + opAt = 'A' // .At(i) (Tuple) + opField = 'F' // .Field(i) (Struct) + opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) + opObj = 'O' // .Obj() (Named, TypeParam) +) + +// For returns the path to an object relative to its package, +// or an error if the object is not accessible from the package's Scope. +// +// The For function guarantees to return a path only for the following objects: +// - package-level types +// - exported package-level non-types +// - methods +// - parameter and result variables +// - struct fields +// These objects are sufficient to define the API of their package. +// The objects described by a package's export data are drawn from this set. +// +// For does not return a path for predeclared names, imported package +// names, local names, and unexported package-level names (except +// types). +// +// Example: given this definition, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// For(X) would return a path that denotes the following sequence of operations: +// +// p.Scope().Lookup("T") (TypeName T) +// .Type().Underlying().Method(0). (method Func f) +// .Type().Results().At(1) (field Var b) +// .Type().Field(0) (field Var X) +// +// where p is the package (*types.Package) to which X belongs. +func For(obj types.Object) (Path, error) { + return newEncoderFor()(obj) +} + +// An encoder amortizes the cost of encoding the paths of multiple objects. +// Nonexported pending approval of proposal 58668. +type encoder struct { + scopeNamesMemo map[*types.Scope][]string // memoization of Scope.Names() + namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods() +} + +// Exposed to gopls via golang.org/x/tools/internal/typesinternal +// pending approval of proposal 58668. +// +//go:linkname newEncoderFor +func newEncoderFor() func(types.Object) (Path, error) { return new(encoder).For } + +func (enc *encoder) For(obj types.Object) (Path, error) { + pkg := obj.Pkg() + + // This table lists the cases of interest. + // + // Object Action + // ------ ------ + // nil reject + // builtin reject + // pkgname reject + // label reject + // var + // package-level accept + // func param/result accept + // local reject + // struct field accept + // const + // package-level accept + // local reject + // func + // package-level accept + // init functions reject + // concrete method accept + // interface method accept + // type + // package-level accept + // local reject + // + // The only accessible package-level objects are members of pkg itself. + // + // The cases are handled in four steps: + // + // 1. reject nil and builtin + // 2. accept package-level objects + // 3. reject obviously invalid objects + // 4. search the API for the path to the param/result/field/method. + + // 1. reference to nil or builtin? + if pkg == nil { + return "", fmt.Errorf("predeclared %s has no path", obj) + } + scope := pkg.Scope() + + // 2. package-level object? + if scope.Lookup(obj.Name()) == obj { + // Only exported objects (and non-exported types) have a path. + // Non-exported types may be referenced by other objects. + if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() { + return "", fmt.Errorf("no path for non-exported %v", obj) + } + return Path(obj.Name()), nil + } + + // 3. Not a package-level object. + // Reject obviously non-viable cases. + switch obj := obj.(type) { + case *types.TypeName: + if _, ok := obj.Type().(*typeparams.TypeParam); !ok { + // With the exception of type parameters, only package-level type names + // have a path. + return "", fmt.Errorf("no path for %v", obj) + } + case *types.Const, // Only package-level constants have a path. + *types.Label, // Labels are function-local. + *types.PkgName: // PkgNames are file-local. + return "", fmt.Errorf("no path for %v", obj) + + case *types.Var: + // Could be: + // - a field (obj.IsField()) + // - a func parameter or result + // - a local var. + // Sadly there is no way to distinguish + // a param/result from a local + // so we must proceed to the find. + + case *types.Func: + // A func, if not package-level, must be a method. + if recv := obj.Type().(*types.Signature).Recv(); recv == nil { + return "", fmt.Errorf("func is not a method: %v", obj) + } + + if path, ok := enc.concreteMethod(obj); ok { + // Fast path for concrete methods that avoids looping over scope. + return path, nil + } + + default: + panic(obj) + } + + // 4. Search the API for the path to the var (field/param/result) or method. + + // First inspect package-level named types. + // In the presence of path aliases, these give + // the best paths because non-types may + // refer to types, but not the reverse. + empty := make([]byte, 0, 48) // initial space + names := enc.scopeNames(scope) + for _, name := range names { + o := scope.Lookup(name) + tname, ok := o.(*types.TypeName) + if !ok { + continue // handle non-types in second pass + } + + path := append(empty, name...) + path = append(path, opType) + + T := o.Type() + + if tname.IsAlias() { + // type alias + if r := find(obj, T, path, nil); r != nil { + return Path(r), nil + } + } else { + if named, _ := T.(*types.Named); named != nil { + if r := findTypeParam(obj, typeparams.ForNamed(named), path, nil); r != nil { + // generic named type + return Path(r), nil + } + } + // defined (named) type + if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil { + return Path(r), nil + } + } + } + + // Then inspect everything else: + // non-types, and declared methods of defined types. + for _, name := range names { + o := scope.Lookup(name) + path := append(empty, name...) + if _, ok := o.(*types.TypeName); !ok { + if o.Exported() { + // exported non-type (const, var, func) + if r := find(obj, o.Type(), append(path, opType), nil); r != nil { + return Path(r), nil + } + } + continue + } + + // Inspect declared methods of defined types. + if T, ok := o.Type().(*types.Named); ok { + path = append(path, opType) + // Note that method index here is always with respect + // to canonical ordering of methods, regardless of how + // they appear in the underlying type. + for i, m := range enc.namedMethods(T) { + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return Path(path2), nil // found declared method + } + if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + return Path(r), nil + } + } + } + } + + return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path()) +} + +func appendOpArg(path []byte, op byte, arg int) []byte { + path = append(path, op) + path = strconv.AppendInt(path, int64(arg), 10) + return path +} + +// concreteMethod returns the path for meth, which must have a non-nil receiver. +// The second return value indicates success and may be false if the method is +// an interface method or if it is an instantiated method. +// +// This function is just an optimization that avoids the general scope walking +// approach. You are expected to fall back to the general approach if this +// function fails. +func (enc *encoder) concreteMethod(meth *types.Func) (Path, bool) { + // Concrete methods can only be declared on package-scoped named types. For + // that reason we can skip the expensive walk over the package scope: the + // path will always be package -> named type -> method. We can trivially get + // the type name from the receiver, and only have to look over the type's + // methods to find the method index. + // + // Methods on generic types require special consideration, however. Consider + // the following package: + // + // L1: type S[T any] struct{} + // L2: func (recv S[A]) Foo() { recv.Bar() } + // L3: func (recv S[B]) Bar() { } + // L4: type Alias = S[int] + // L5: func _[T any]() { var s S[int]; s.Foo() } + // + // The receivers of methods on generic types are instantiations. L2 and L3 + // instantiate S with the type-parameters A and B, which are scoped to the + // respective methods. L4 and L5 each instantiate S with int. Each of these + // instantiations has its own method set, full of methods (and thus objects) + // with receivers whose types are the respective instantiations. In other + // words, we have + // + // S[A].Foo, S[A].Bar + // S[B].Foo, S[B].Bar + // S[int].Foo, S[int].Bar + // + // We may thus be trying to produce object paths for any of these objects. + // + // S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo + // and S.Bar, which are the paths that this function naturally produces. + // + // S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that + // don't correspond to the origin methods. For S[int], this is significant. + // The most precise object path for S[int].Foo, for example, is Alias.Foo, + // not S.Foo. Our function, however, would produce S.Foo, which would + // resolve to a different object. + // + // For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are + // still the correct paths, since only the origin methods have meaningful + // paths. But this is likely only true for trivial cases and has edge cases. + // Since this function is only an optimization, we err on the side of giving + // up, deferring to the slower but definitely correct algorithm. Most users + // of objectpath will only be giving us origin methods, anyway, as referring + // to instantiated methods is usually not useful. + + if typeparams.OriginMethod(meth) != meth { + return "", false + } + + recvT := meth.Type().(*types.Signature).Recv().Type() + if ptr, ok := recvT.(*types.Pointer); ok { + recvT = ptr.Elem() + } + + named, ok := recvT.(*types.Named) + if !ok { + return "", false + } + + if types.IsInterface(named) { + // Named interfaces don't have to be package-scoped + // + // TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface + // methods, too, I think. + return "", false + } + + // Preallocate space for the name, opType, opMethod, and some digits. + name := named.Obj().Name() + path := make([]byte, 0, len(name)+8) + path = append(path, name...) + path = append(path, opType) + for i, m := range enc.namedMethods(named) { + if m == meth { + path = appendOpArg(path, opMethod, i) + return Path(path), true + } + } + + panic(fmt.Sprintf("couldn't find method %s on type %s", meth, named)) +} + +// find finds obj within type T, returning the path to it, or nil if not found. +// +// The seen map is used to short circuit cycles through type parameters. If +// nil, it will be allocated as necessary. +func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { + switch T := T.(type) { + case *types.Basic, *types.Named: + // Named types belonging to pkg were handled already, + // so T must belong to another package. No path. + return nil + case *types.Pointer: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Slice: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Array: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Chan: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Map: + if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { + return r + } + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Signature: + if r := findTypeParam(obj, typeparams.ForSignature(T), path, seen); r != nil { + return r + } + if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { + return r + } + return find(obj, T.Results(), append(path, opResults), seen) + case *types.Struct: + for i := 0; i < T.NumFields(); i++ { + fld := T.Field(i) + path2 := appendOpArg(path, opField, i) + if fld == obj { + return path2 // found field var + } + if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *types.Tuple: + for i := 0; i < T.Len(); i++ { + v := T.At(i) + path2 := appendOpArg(path, opAt, i) + if v == obj { + return path2 // found param/result var + } + if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *types.Interface: + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return path2 // found interface method + } + if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *typeparams.TypeParam: + name := T.Obj() + if name == obj { + return append(path, opObj) + } + if seen[name] { + return nil + } + if seen == nil { + seen = make(map[*types.TypeName]bool) + } + seen[name] = true + if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { + return r + } + return nil + } + panic(T) +} + +func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { + for i := 0; i < list.Len(); i++ { + tparam := list.At(i) + path2 := appendOpArg(path, opTypeParam, i) + if r := find(obj, tparam, path2, seen); r != nil { + return r + } + } + return nil +} + +// Object returns the object denoted by path p within the package pkg. +func Object(pkg *types.Package, p Path) (types.Object, error) { + if p == "" { + return nil, fmt.Errorf("empty path") + } + + pathstr := string(p) + var pkgobj, suffix string + if dot := strings.IndexByte(pathstr, opType); dot < 0 { + pkgobj = pathstr + } else { + pkgobj = pathstr[:dot] + suffix = pathstr[dot:] // suffix starts with "." + } + + obj := pkg.Scope().Lookup(pkgobj) + if obj == nil { + return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj) + } + + // abstraction of *types.{Pointer,Slice,Array,Chan,Map} + type hasElem interface { + Elem() types.Type + } + // abstraction of *types.{Named,Signature} + type hasTypeParams interface { + TypeParams() *typeparams.TypeParamList + } + // abstraction of *types.{Named,TypeParam} + type hasObj interface { + Obj() *types.TypeName + } + + // The loop state is the pair (t, obj), + // exactly one of which is non-nil, initially obj. + // All suffixes start with '.' (the only object->type operation), + // followed by optional type->type operations, + // then a type->object operation. + // The cycle then repeats. + var t types.Type + for suffix != "" { + code := suffix[0] + suffix = suffix[1:] + + // Codes [AFM] have an integer operand. + var index int + switch code { + case opAt, opField, opMethod, opTypeParam: + rest := strings.TrimLeft(suffix, "0123456789") + numerals := suffix[:len(suffix)-len(rest)] + suffix = rest + i, err := strconv.Atoi(numerals) + if err != nil { + return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code) + } + index = int(i) + case opObj: + // no operand + default: + // The suffix must end with a type->object operation. + if suffix == "" { + return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code) + } + } + + if code == opType { + if t != nil { + return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType) + } + t = obj.Type() + obj = nil + continue + } + + if t == nil { + return nil, fmt.Errorf("invalid path: code %q in object context", code) + } + + // Inv: t != nil, obj == nil + + switch code { + case opElem: + hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t) + } + t = hasElem.Elem() + + case opKey: + mapType, ok := t.(*types.Map) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t) + } + t = mapType.Key() + + case opParams: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Params() + + case opResults: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Results() + + case opUnderlying: + named, ok := t.(*types.Named) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t) + } + t = named.Underlying() + + case opTypeParam: + hasTypeParams, ok := t.(hasTypeParams) // Named, Signature + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t) + } + tparams := hasTypeParams.TypeParams() + if n := tparams.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + t = tparams.At(index) + + case opConstraint: + tparam, ok := t.(*typeparams.TypeParam) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t) + } + t = tparam.Constraint() + + case opAt: + tuple, ok := t.(*types.Tuple) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t) + } + if n := tuple.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + obj = tuple.At(index) + t = nil + + case opField: + structType, ok := t.(*types.Struct) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t) + } + if n := structType.NumFields(); index >= n { + return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n) + } + obj = structType.Field(index) + t = nil + + case opMethod: + switch t := t.(type) { + case *types.Interface: + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) + } + obj = t.Method(index) // Id-ordered + + case *types.Named: + methods := namedMethods(t) // (unmemoized) + if index >= len(methods) { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, len(methods)) + } + obj = methods[index] // Id-ordered + + default: + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t) + } + t = nil + + case opObj: + hasObj, ok := t.(hasObj) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t) + } + obj = hasObj.Obj() + t = nil + + default: + return nil, fmt.Errorf("invalid path: unknown code %q", code) + } + } + + if obj.Pkg() != pkg { + return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) + } + + return obj, nil // success +} + +// namedMethods returns the methods of a Named type in ascending Id order. +func namedMethods(named *types.Named) []*types.Func { + methods := make([]*types.Func, named.NumMethods()) + for i := range methods { + methods[i] = named.Method(i) + } + sort.Slice(methods, func(i, j int) bool { + return methods[i].Id() < methods[j].Id() + }) + return methods +} + +// scopeNames is a memoization of scope.Names. Callers must not modify the result. +func (enc *encoder) scopeNames(scope *types.Scope) []string { + m := enc.scopeNamesMemo + if m == nil { + m = make(map[*types.Scope][]string) + enc.scopeNamesMemo = m + } + names, ok := m[scope] + if !ok { + names = scope.Names() // allocates and sorts + m[scope] = names + } + return names +} + +// namedMethods is a memoization of the namedMethods function. Callers must not modify the result. +func (enc *encoder) namedMethods(named *types.Named) []*types.Func { + m := enc.namedMethodsMemo + if m == nil { + m = make(map[*types.Named][]*types.Func) + enc.namedMethodsMemo = m + } + methods, ok := m[named] + if !ok { + methods = namedMethods(named) // allocates and sorts + m[named] = methods + } + return methods + +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index 0372fb3a..a973dece 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -7,6 +7,18 @@ // Package gcimporter provides various functions for reading // gc-generated object files that can be used to implement the // Importer interface defined by the Go 1.5 standard library package. +// +// The encoding is deterministic: if the encoder is applied twice to +// the same types.Package data structure, both encodings are equal. +// This property may be important to avoid spurious changes in +// applications such as build systems. +// +// However, the encoder is not necessarily idempotent. Importing an +// exported package may yield a types.Package that, while it +// represents the same set of Go types as the original, may differ in +// the details of its internal representation. Because of these +// differences, re-encoding the imported package may yield a +// different, but equally valid, encoding of the package. package gcimporter // import "golang.org/x/tools/internal/gcimporter" import ( diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index b285a11c..34fc783f 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -12,6 +12,7 @@ package gcimporter import ( "go/token" "go/types" + "sort" "strings" "golang.org/x/tools/internal/pkgbits" @@ -121,6 +122,16 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st iface.Complete() } + // Imports() of pkg are all of the transitive packages that were loaded. + var imps []*types.Package + for _, imp := range pr.pkgs { + if imp != nil && imp != pkg { + imps = append(imps, imp) + } + } + sort.Sort(byPath(imps)) + pkg.SetImports(imps) + pkg.MarkComplete() return pkg } @@ -260,39 +271,9 @@ func (r *reader) doPkg() *types.Package { pkg := types.NewPackage(path, name) r.p.imports[path] = pkg - imports := make([]*types.Package, r.Len()) - for i := range imports { - imports[i] = r.pkg() - } - pkg.SetImports(flattenImports(imports)) - return pkg } -// flattenImports returns the transitive closure of all imported -// packages rooted from pkgs. -func flattenImports(pkgs []*types.Package) []*types.Package { - var res []*types.Package - seen := make(map[*types.Package]struct{}) - for _, pkg := range pkgs { - if _, ok := seen[pkg]; ok { - continue - } - seen[pkg] = struct{}{} - res = append(res, pkg) - - // pkg.Imports() is already flattened. - for _, pkg := range pkg.Imports() { - if _, ok := seen[pkg]; ok { - continue - } - seen[pkg] = struct{}{} - res = append(res, pkg) - } - } - return res -} - // @@@ Types func (r *reader) typ() types.Type { diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go index 25a1426d..cfba8189 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -87,7 +87,6 @@ func IsTypeParam(t types.Type) bool { func OriginMethod(fn *types.Func) *types.Func { recv := fn.Type().(*types.Signature).Recv() if recv == nil { - return fn } base := recv.Type() diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index ce7d4351..3c53fbc6 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -11,6 +11,8 @@ import ( "go/types" "reflect" "unsafe" + + "golang.org/x/tools/go/types/objectpath" ) func SetUsesCgo(conf *types.Config) bool { @@ -50,3 +52,10 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, } var SetGoVersion = func(conf *types.Config, version string) bool { return false } + +// NewObjectpathEncoder returns a function closure equivalent to +// objectpath.For but amortized for multiple (sequential) calls. +// It is a temporary workaround, pending the approval of proposal 58668. +// +//go:linkname NewObjectpathFunc golang.org/x/tools/go/types/objectpath.newEncoderFor +func NewObjectpathFunc() func(types.Object) (objectpath.Path, error) diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/init.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/init.go new file mode 100644 index 00000000..369df13d --- /dev/null +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/init.go @@ -0,0 +1,168 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal_gengo + +import ( + "unicode" + "unicode/utf8" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/encoding/protowire" + + "google.golang.org/protobuf/types/descriptorpb" +) + +type fileInfo struct { + *protogen.File + + allEnums []*enumInfo + allMessages []*messageInfo + allExtensions []*extensionInfo + + allEnumsByPtr map[*enumInfo]int // value is index into allEnums + allMessagesByPtr map[*messageInfo]int // value is index into allMessages + allMessageFieldsByPtr map[*messageInfo]*structFields + + // needRawDesc specifies whether the generator should emit logic to provide + // the legacy raw descriptor in GZIP'd form. + // This is updated by enum and message generation logic as necessary, + // and checked at the end of file generation. + needRawDesc bool +} + +type structFields struct { + count int + unexported map[int]string +} + +func (sf *structFields) append(name string) { + if r, _ := utf8.DecodeRuneInString(name); !unicode.IsUpper(r) { + if sf.unexported == nil { + sf.unexported = make(map[int]string) + } + sf.unexported[sf.count] = name + } + sf.count++ +} + +func newFileInfo(file *protogen.File) *fileInfo { + f := &fileInfo{File: file} + + // Collect all enums, messages, and extensions in "flattened ordering". + // See filetype.TypeBuilder. + var walkMessages func([]*protogen.Message, func(*protogen.Message)) + walkMessages = func(messages []*protogen.Message, f func(*protogen.Message)) { + for _, m := range messages { + f(m) + walkMessages(m.Messages, f) + } + } + initEnumInfos := func(enums []*protogen.Enum) { + for _, enum := range enums { + f.allEnums = append(f.allEnums, newEnumInfo(f, enum)) + } + } + initMessageInfos := func(messages []*protogen.Message) { + for _, message := range messages { + f.allMessages = append(f.allMessages, newMessageInfo(f, message)) + } + } + initExtensionInfos := func(extensions []*protogen.Extension) { + for _, extension := range extensions { + f.allExtensions = append(f.allExtensions, newExtensionInfo(f, extension)) + } + } + initEnumInfos(f.Enums) + initMessageInfos(f.Messages) + initExtensionInfos(f.Extensions) + walkMessages(f.Messages, func(m *protogen.Message) { + initEnumInfos(m.Enums) + initMessageInfos(m.Messages) + initExtensionInfos(m.Extensions) + }) + + // Derive a reverse mapping of enum and message pointers to their index + // in allEnums and allMessages. + if len(f.allEnums) > 0 { + f.allEnumsByPtr = make(map[*enumInfo]int) + for i, e := range f.allEnums { + f.allEnumsByPtr[e] = i + } + } + if len(f.allMessages) > 0 { + f.allMessagesByPtr = make(map[*messageInfo]int) + f.allMessageFieldsByPtr = make(map[*messageInfo]*structFields) + for i, m := range f.allMessages { + f.allMessagesByPtr[m] = i + f.allMessageFieldsByPtr[m] = new(structFields) + } + } + + return f +} + +type enumInfo struct { + *protogen.Enum + + genJSONMethod bool + genRawDescMethod bool +} + +func newEnumInfo(f *fileInfo, enum *protogen.Enum) *enumInfo { + e := &enumInfo{Enum: enum} + e.genJSONMethod = true + e.genRawDescMethod = true + return e +} + +type messageInfo struct { + *protogen.Message + + genRawDescMethod bool + genExtRangeMethod bool + + isTracked bool + hasWeak bool +} + +func newMessageInfo(f *fileInfo, message *protogen.Message) *messageInfo { + m := &messageInfo{Message: message} + m.genRawDescMethod = true + m.genExtRangeMethod = true + m.isTracked = isTrackedMessage(m) + for _, field := range m.Fields { + m.hasWeak = m.hasWeak || field.Desc.IsWeak() + } + return m +} + +// isTrackedMessage reports whether field tracking is enabled on the message. +func isTrackedMessage(m *messageInfo) (tracked bool) { + const trackFieldUse_fieldNumber = 37383685 + + // Decode the option from unknown fields to avoid a dependency on the + // annotation proto from protoc-gen-go. + b := m.Desc.Options().(*descriptorpb.MessageOptions).ProtoReflect().GetUnknown() + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + if num == trackFieldUse_fieldNumber && typ == protowire.VarintType { + v, _ := protowire.ConsumeVarint(b) + tracked = protowire.DecodeBool(v) + } + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + return tracked +} + +type extensionInfo struct { + *protogen.Extension +} + +func newExtensionInfo(f *fileInfo, extension *protogen.Extension) *extensionInfo { + x := &extensionInfo{Extension: extension} + return x +} diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go new file mode 100644 index 00000000..f8b76bf5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go @@ -0,0 +1,896 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal_gengo is internal to the protobuf module. +package internal_gengo + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "math" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/internal/encoding/tag" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/version" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoimpl" + + "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/pluginpb" +) + +// SupportedFeatures reports the set of supported protobuf language features. +var SupportedFeatures = uint64(pluginpb.CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL) + +// GenerateVersionMarkers specifies whether to generate version markers. +var GenerateVersionMarkers = true + +// Standard library dependencies. +const ( + base64Package = protogen.GoImportPath("encoding/base64") + mathPackage = protogen.GoImportPath("math") + reflectPackage = protogen.GoImportPath("reflect") + sortPackage = protogen.GoImportPath("sort") + stringsPackage = protogen.GoImportPath("strings") + syncPackage = protogen.GoImportPath("sync") + timePackage = protogen.GoImportPath("time") + utf8Package = protogen.GoImportPath("unicode/utf8") +) + +// Protobuf library dependencies. +// +// These are declared as an interface type so that they can be more easily +// patched to support unique build environments that impose restrictions +// on the dependencies of generated source code. +var ( + protoPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/proto") + protoifacePackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/runtime/protoiface") + protoimplPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/runtime/protoimpl") + protojsonPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/encoding/protojson") + protoreflectPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/reflect/protoreflect") + protoregistryPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/reflect/protoregistry") +) + +type goImportPath interface { + String() string + Ident(string) protogen.GoIdent +} + +// GenerateFile generates the contents of a .pb.go file. +func GenerateFile(gen *protogen.Plugin, file *protogen.File) *protogen.GeneratedFile { + filename := file.GeneratedFilenamePrefix + ".pb.go" + g := gen.NewGeneratedFile(filename, file.GoImportPath) + f := newFileInfo(file) + + genStandaloneComments(g, f, int32(genid.FileDescriptorProto_Syntax_field_number)) + genGeneratedHeader(gen, g, f) + genStandaloneComments(g, f, int32(genid.FileDescriptorProto_Package_field_number)) + + packageDoc := genPackageKnownComment(f) + g.P(packageDoc, "package ", f.GoPackageName) + g.P() + + // Emit a static check that enforces a minimum version of the proto package. + if GenerateVersionMarkers { + g.P("const (") + g.P("// Verify that this generated code is sufficiently up-to-date.") + g.P("_ = ", protoimplPackage.Ident("EnforceVersion"), "(", protoimpl.GenVersion, " - ", protoimplPackage.Ident("MinVersion"), ")") + g.P("// Verify that runtime/protoimpl is sufficiently up-to-date.") + g.P("_ = ", protoimplPackage.Ident("EnforceVersion"), "(", protoimplPackage.Ident("MaxVersion"), " - ", protoimpl.GenVersion, ")") + g.P(")") + g.P() + } + + for i, imps := 0, f.Desc.Imports(); i < imps.Len(); i++ { + genImport(gen, g, f, imps.Get(i)) + } + for _, enum := range f.allEnums { + genEnum(g, f, enum) + } + for _, message := range f.allMessages { + genMessage(g, f, message) + } + genExtensions(g, f) + + genReflectFileDescriptor(gen, g, f) + + return g +} + +// genStandaloneComments prints all leading comments for a FileDescriptorProto +// location identified by the field number n. +func genStandaloneComments(g *protogen.GeneratedFile, f *fileInfo, n int32) { + loc := f.Desc.SourceLocations().ByPath(protoreflect.SourcePath{n}) + for _, s := range loc.LeadingDetachedComments { + g.P(protogen.Comments(s)) + g.P() + } + if s := loc.LeadingComments; s != "" { + g.P(protogen.Comments(s)) + g.P() + } +} + +func genGeneratedHeader(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) { + g.P("// Code generated by protoc-gen-go. DO NOT EDIT.") + + if GenerateVersionMarkers { + g.P("// versions:") + protocGenGoVersion := version.String() + protocVersion := "(unknown)" + if v := gen.Request.GetCompilerVersion(); v != nil { + protocVersion = fmt.Sprintf("v%v.%v.%v", v.GetMajor(), v.GetMinor(), v.GetPatch()) + if s := v.GetSuffix(); s != "" { + protocVersion += "-" + s + } + } + g.P("// \tprotoc-gen-go ", protocGenGoVersion) + g.P("// \tprotoc ", protocVersion) + } + + if f.Proto.GetOptions().GetDeprecated() { + g.P("// ", f.Desc.Path(), " is a deprecated file.") + } else { + g.P("// source: ", f.Desc.Path()) + } + g.P() +} + +func genImport(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo, imp protoreflect.FileImport) { + impFile, ok := gen.FilesByPath[imp.Path()] + if !ok { + return + } + if impFile.GoImportPath == f.GoImportPath { + // Don't generate imports or aliases for types in the same Go package. + return + } + // Generate imports for all non-weak dependencies, even if they are not + // referenced, because other code and tools depend on having the + // full transitive closure of protocol buffer types in the binary. + if !imp.IsWeak { + g.Import(impFile.GoImportPath) + } + if !imp.IsPublic { + return + } + + // Generate public imports by generating the imported file, parsing it, + // and extracting every symbol that should receive a forwarding declaration. + impGen := GenerateFile(gen, impFile) + impGen.Skip() + b, err := impGen.Content() + if err != nil { + gen.Error(err) + return + } + fset := token.NewFileSet() + astFile, err := parser.ParseFile(fset, "", b, parser.ParseComments) + if err != nil { + gen.Error(err) + return + } + genForward := func(tok token.Token, name string, expr ast.Expr) { + // Don't import unexported symbols. + r, _ := utf8.DecodeRuneInString(name) + if !unicode.IsUpper(r) { + return + } + // Don't import the FileDescriptor. + if name == impFile.GoDescriptorIdent.GoName { + return + } + // Don't import decls referencing a symbol defined in another package. + // i.e., don't import decls which are themselves public imports: + // + // type T = somepackage.T + if _, ok := expr.(*ast.SelectorExpr); ok { + return + } + g.P(tok, " ", name, " = ", impFile.GoImportPath.Ident(name)) + } + g.P("// Symbols defined in public import of ", imp.Path(), ".") + g.P() + for _, decl := range astFile.Decls { + switch decl := decl.(type) { + case *ast.GenDecl: + for _, spec := range decl.Specs { + switch spec := spec.(type) { + case *ast.TypeSpec: + genForward(decl.Tok, spec.Name.Name, spec.Type) + case *ast.ValueSpec: + for i, name := range spec.Names { + var expr ast.Expr + if i < len(spec.Values) { + expr = spec.Values[i] + } + genForward(decl.Tok, name.Name, expr) + } + case *ast.ImportSpec: + default: + panic(fmt.Sprintf("can't generate forward for spec type %T", spec)) + } + } + } + } + g.P() +} + +func genEnum(g *protogen.GeneratedFile, f *fileInfo, e *enumInfo) { + // Enum type declaration. + g.Annotate(e.GoIdent.GoName, e.Location) + leadingComments := appendDeprecationSuffix(e.Comments.Leading, + e.Desc.ParentFile(), + e.Desc.Options().(*descriptorpb.EnumOptions).GetDeprecated()) + g.P(leadingComments, + "type ", e.GoIdent, " int32") + + // Enum value constants. + g.P("const (") + for _, value := range e.Values { + g.Annotate(value.GoIdent.GoName, value.Location) + leadingComments := appendDeprecationSuffix(value.Comments.Leading, + value.Desc.ParentFile(), + value.Desc.Options().(*descriptorpb.EnumValueOptions).GetDeprecated()) + g.P(leadingComments, + value.GoIdent, " ", e.GoIdent, " = ", value.Desc.Number(), + trailingComment(value.Comments.Trailing)) + } + g.P(")") + g.P() + + // Enum value maps. + g.P("// Enum value maps for ", e.GoIdent, ".") + g.P("var (") + g.P(e.GoIdent.GoName+"_name", " = map[int32]string{") + for _, value := range e.Values { + duplicate := "" + if value.Desc != e.Desc.Values().ByNumber(value.Desc.Number()) { + duplicate = "// Duplicate value: " + } + g.P(duplicate, value.Desc.Number(), ": ", strconv.Quote(string(value.Desc.Name())), ",") + } + g.P("}") + g.P(e.GoIdent.GoName+"_value", " = map[string]int32{") + for _, value := range e.Values { + g.P(strconv.Quote(string(value.Desc.Name())), ": ", value.Desc.Number(), ",") + } + g.P("}") + g.P(")") + g.P() + + // Enum method. + // + // NOTE: A pointer value is needed to represent presence in proto2. + // Since a proto2 message can reference a proto3 enum, it is useful to + // always generate this method (even on proto3 enums) to support that case. + g.P("func (x ", e.GoIdent, ") Enum() *", e.GoIdent, " {") + g.P("p := new(", e.GoIdent, ")") + g.P("*p = x") + g.P("return p") + g.P("}") + g.P() + + // String method. + g.P("func (x ", e.GoIdent, ") String() string {") + g.P("return ", protoimplPackage.Ident("X"), ".EnumStringOf(x.Descriptor(), ", protoreflectPackage.Ident("EnumNumber"), "(x))") + g.P("}") + g.P() + + genEnumReflectMethods(g, f, e) + + // UnmarshalJSON method. + if e.genJSONMethod && e.Desc.Syntax() == protoreflect.Proto2 { + g.P("// Deprecated: Do not use.") + g.P("func (x *", e.GoIdent, ") UnmarshalJSON(b []byte) error {") + g.P("num, err := ", protoimplPackage.Ident("X"), ".UnmarshalJSONEnum(x.Descriptor(), b)") + g.P("if err != nil {") + g.P("return err") + g.P("}") + g.P("*x = ", e.GoIdent, "(num)") + g.P("return nil") + g.P("}") + g.P() + } + + // EnumDescriptor method. + if e.genRawDescMethod { + var indexes []string + for i := 1; i < len(e.Location.Path); i += 2 { + indexes = append(indexes, strconv.Itoa(int(e.Location.Path[i]))) + } + g.P("// Deprecated: Use ", e.GoIdent, ".Descriptor instead.") + g.P("func (", e.GoIdent, ") EnumDescriptor() ([]byte, []int) {") + g.P("return ", rawDescVarName(f), "GZIP(), []int{", strings.Join(indexes, ","), "}") + g.P("}") + g.P() + f.needRawDesc = true + } +} + +func genMessage(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + if m.Desc.IsMapEntry() { + return + } + + // Message type declaration. + g.Annotate(m.GoIdent.GoName, m.Location) + leadingComments := appendDeprecationSuffix(m.Comments.Leading, + m.Desc.ParentFile(), + m.Desc.Options().(*descriptorpb.MessageOptions).GetDeprecated()) + g.P(leadingComments, + "type ", m.GoIdent, " struct {") + genMessageFields(g, f, m) + g.P("}") + g.P() + + genMessageKnownFunctions(g, f, m) + genMessageDefaultDecls(g, f, m) + genMessageMethods(g, f, m) + genMessageOneofWrapperTypes(g, f, m) +} + +func genMessageFields(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + sf := f.allMessageFieldsByPtr[m] + genMessageInternalFields(g, f, m, sf) + for _, field := range m.Fields { + genMessageField(g, f, m, field, sf) + } +} + +func genMessageInternalFields(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, sf *structFields) { + g.P(genid.State_goname, " ", protoimplPackage.Ident("MessageState")) + sf.append(genid.State_goname) + g.P(genid.SizeCache_goname, " ", protoimplPackage.Ident("SizeCache")) + sf.append(genid.SizeCache_goname) + if m.hasWeak { + g.P(genid.WeakFields_goname, " ", protoimplPackage.Ident("WeakFields")) + sf.append(genid.WeakFields_goname) + } + g.P(genid.UnknownFields_goname, " ", protoimplPackage.Ident("UnknownFields")) + sf.append(genid.UnknownFields_goname) + if m.Desc.ExtensionRanges().Len() > 0 { + g.P(genid.ExtensionFields_goname, " ", protoimplPackage.Ident("ExtensionFields")) + sf.append(genid.ExtensionFields_goname) + } + if sf.count > 0 { + g.P() + } +} + +func genMessageField(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, field *protogen.Field, sf *structFields) { + if oneof := field.Oneof; oneof != nil && !oneof.Desc.IsSynthetic() { + // It would be a bit simpler to iterate over the oneofs below, + // but generating the field here keeps the contents of the Go + // struct in the same order as the contents of the source + // .proto file. + if oneof.Fields[0] != field { + return // only generate for first appearance + } + + tags := structTags{ + {"protobuf_oneof", string(oneof.Desc.Name())}, + } + if m.isTracked { + tags = append(tags, gotrackTags...) + } + + g.Annotate(m.GoIdent.GoName+"."+oneof.GoName, oneof.Location) + leadingComments := oneof.Comments.Leading + if leadingComments != "" { + leadingComments += "\n" + } + ss := []string{fmt.Sprintf(" Types that are assignable to %s:\n", oneof.GoName)} + for _, field := range oneof.Fields { + ss = append(ss, "\t*"+field.GoIdent.GoName+"\n") + } + leadingComments += protogen.Comments(strings.Join(ss, "")) + g.P(leadingComments, + oneof.GoName, " ", oneofInterfaceName(oneof), tags) + sf.append(oneof.GoName) + return + } + goType, pointer := fieldGoType(g, f, field) + if pointer { + goType = "*" + goType + } + tags := structTags{ + {"protobuf", fieldProtobufTagValue(field)}, + {"json", fieldJSONTagValue(field)}, + } + if field.Desc.IsMap() { + key := field.Message.Fields[0] + val := field.Message.Fields[1] + tags = append(tags, structTags{ + {"protobuf_key", fieldProtobufTagValue(key)}, + {"protobuf_val", fieldProtobufTagValue(val)}, + }...) + } + if m.isTracked { + tags = append(tags, gotrackTags...) + } + + name := field.GoName + if field.Desc.IsWeak() { + name = genid.WeakFieldPrefix_goname + name + } + g.Annotate(m.GoIdent.GoName+"."+name, field.Location) + leadingComments := appendDeprecationSuffix(field.Comments.Leading, + field.Desc.ParentFile(), + field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + g.P(leadingComments, + name, " ", goType, tags, + trailingComment(field.Comments.Trailing)) + sf.append(field.GoName) +} + +// genMessageDefaultDecls generates consts and vars holding the default +// values of fields. +func genMessageDefaultDecls(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + var consts, vars []string + for _, field := range m.Fields { + if !field.Desc.HasDefault() { + continue + } + name := "Default_" + m.GoIdent.GoName + "_" + field.GoName + goType, _ := fieldGoType(g, f, field) + defVal := field.Desc.Default() + switch field.Desc.Kind() { + case protoreflect.StringKind: + consts = append(consts, fmt.Sprintf("%s = %s(%q)", name, goType, defVal.String())) + case protoreflect.BytesKind: + vars = append(vars, fmt.Sprintf("%s = %s(%q)", name, goType, defVal.Bytes())) + case protoreflect.EnumKind: + idx := field.Desc.DefaultEnumValue().Index() + val := field.Enum.Values[idx] + if val.GoIdent.GoImportPath == f.GoImportPath { + consts = append(consts, fmt.Sprintf("%s = %s", name, g.QualifiedGoIdent(val.GoIdent))) + } else { + // If the enum value is declared in a different Go package, + // reference it by number since the name may not be correct. + // See https://github.com/golang/protobuf/issues/513. + consts = append(consts, fmt.Sprintf("%s = %s(%d) // %s", + name, g.QualifiedGoIdent(field.Enum.GoIdent), val.Desc.Number(), g.QualifiedGoIdent(val.GoIdent))) + } + case protoreflect.FloatKind, protoreflect.DoubleKind: + if f := defVal.Float(); math.IsNaN(f) || math.IsInf(f, 0) { + var fn, arg string + switch f := defVal.Float(); { + case math.IsInf(f, -1): + fn, arg = g.QualifiedGoIdent(mathPackage.Ident("Inf")), "-1" + case math.IsInf(f, +1): + fn, arg = g.QualifiedGoIdent(mathPackage.Ident("Inf")), "+1" + case math.IsNaN(f): + fn, arg = g.QualifiedGoIdent(mathPackage.Ident("NaN")), "" + } + vars = append(vars, fmt.Sprintf("%s = %s(%s(%s))", name, goType, fn, arg)) + } else { + consts = append(consts, fmt.Sprintf("%s = %s(%v)", name, goType, f)) + } + default: + consts = append(consts, fmt.Sprintf("%s = %s(%v)", name, goType, defVal.Interface())) + } + } + if len(consts) > 0 { + g.P("// Default values for ", m.GoIdent, " fields.") + g.P("const (") + for _, s := range consts { + g.P(s) + } + g.P(")") + } + if len(vars) > 0 { + g.P("// Default values for ", m.GoIdent, " fields.") + g.P("var (") + for _, s := range vars { + g.P(s) + } + g.P(")") + } + g.P() +} + +func genMessageMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + genMessageBaseMethods(g, f, m) + genMessageGetterMethods(g, f, m) + genMessageSetterMethods(g, f, m) +} + +func genMessageBaseMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + // Reset method. + g.P("func (x *", m.GoIdent, ") Reset() {") + g.P("*x = ", m.GoIdent, "{}") + g.P("if ", protoimplPackage.Ident("UnsafeEnabled"), " {") + g.P("mi := &", messageTypesVarName(f), "[", f.allMessagesByPtr[m], "]") + g.P("ms := ", protoimplPackage.Ident("X"), ".MessageStateOf(", protoimplPackage.Ident("Pointer"), "(x))") + g.P("ms.StoreMessageInfo(mi)") + g.P("}") + g.P("}") + g.P() + + // String method. + g.P("func (x *", m.GoIdent, ") String() string {") + g.P("return ", protoimplPackage.Ident("X"), ".MessageStringOf(x)") + g.P("}") + g.P() + + // ProtoMessage method. + g.P("func (*", m.GoIdent, ") ProtoMessage() {}") + g.P() + + // ProtoReflect method. + genMessageReflectMethods(g, f, m) + + // Descriptor method. + if m.genRawDescMethod { + var indexes []string + for i := 1; i < len(m.Location.Path); i += 2 { + indexes = append(indexes, strconv.Itoa(int(m.Location.Path[i]))) + } + g.P("// Deprecated: Use ", m.GoIdent, ".ProtoReflect.Descriptor instead.") + g.P("func (*", m.GoIdent, ") Descriptor() ([]byte, []int) {") + g.P("return ", rawDescVarName(f), "GZIP(), []int{", strings.Join(indexes, ","), "}") + g.P("}") + g.P() + f.needRawDesc = true + } +} + +func genMessageGetterMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + for _, field := range m.Fields { + genNoInterfacePragma(g, m.isTracked) + + // Getter for parent oneof. + if oneof := field.Oneof; oneof != nil && oneof.Fields[0] == field && !oneof.Desc.IsSynthetic() { + g.Annotate(m.GoIdent.GoName+".Get"+oneof.GoName, oneof.Location) + g.P("func (m *", m.GoIdent.GoName, ") Get", oneof.GoName, "() ", oneofInterfaceName(oneof), " {") + g.P("if m != nil {") + g.P("return m.", oneof.GoName) + g.P("}") + g.P("return nil") + g.P("}") + g.P() + } + + // Getter for message field. + goType, pointer := fieldGoType(g, f, field) + defaultValue := fieldDefaultValue(g, f, m, field) + g.Annotate(m.GoIdent.GoName+".Get"+field.GoName, field.Location) + leadingComments := appendDeprecationSuffix("", + field.Desc.ParentFile(), + field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + switch { + case field.Desc.IsWeak(): + g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", protoPackage.Ident("Message"), "{") + g.P("var w ", protoimplPackage.Ident("WeakFields")) + g.P("if x != nil {") + g.P("w = x.", genid.WeakFields_goname) + if m.isTracked { + g.P("_ = x.", genid.WeakFieldPrefix_goname+field.GoName) + } + g.P("}") + g.P("return ", protoimplPackage.Ident("X"), ".GetWeak(w, ", field.Desc.Number(), ", ", strconv.Quote(string(field.Message.Desc.FullName())), ")") + g.P("}") + case field.Oneof != nil && !field.Oneof.Desc.IsSynthetic(): + g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", goType, " {") + g.P("if x, ok := x.Get", field.Oneof.GoName, "().(*", field.GoIdent, "); ok {") + g.P("return x.", field.GoName) + g.P("}") + g.P("return ", defaultValue) + g.P("}") + default: + g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", goType, " {") + if !field.Desc.HasPresence() || defaultValue == "nil" { + g.P("if x != nil {") + } else { + g.P("if x != nil && x.", field.GoName, " != nil {") + } + star := "" + if pointer { + star = "*" + } + g.P("return ", star, " x.", field.GoName) + g.P("}") + g.P("return ", defaultValue) + g.P("}") + } + g.P() + } +} + +func genMessageSetterMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + for _, field := range m.Fields { + if !field.Desc.IsWeak() { + continue + } + + genNoInterfacePragma(g, m.isTracked) + + g.Annotate(m.GoIdent.GoName+".Set"+field.GoName, field.Location) + leadingComments := appendDeprecationSuffix("", + field.Desc.ParentFile(), + field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + g.P(leadingComments, "func (x *", m.GoIdent, ") Set", field.GoName, "(v ", protoPackage.Ident("Message"), ") {") + g.P("var w *", protoimplPackage.Ident("WeakFields")) + g.P("if x != nil {") + g.P("w = &x.", genid.WeakFields_goname) + if m.isTracked { + g.P("_ = x.", genid.WeakFieldPrefix_goname+field.GoName) + } + g.P("}") + g.P(protoimplPackage.Ident("X"), ".SetWeak(w, ", field.Desc.Number(), ", ", strconv.Quote(string(field.Message.Desc.FullName())), ", v)") + g.P("}") + g.P() + } +} + +// fieldGoType returns the Go type used for a field. +// +// If it returns pointer=true, the struct field is a pointer to the type. +func fieldGoType(g *protogen.GeneratedFile, f *fileInfo, field *protogen.Field) (goType string, pointer bool) { + if field.Desc.IsWeak() { + return "struct{}", false + } + + pointer = field.Desc.HasPresence() + switch field.Desc.Kind() { + case protoreflect.BoolKind: + goType = "bool" + case protoreflect.EnumKind: + goType = g.QualifiedGoIdent(field.Enum.GoIdent) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + goType = "int32" + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + goType = "uint32" + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + goType = "int64" + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + goType = "uint64" + case protoreflect.FloatKind: + goType = "float32" + case protoreflect.DoubleKind: + goType = "float64" + case protoreflect.StringKind: + goType = "string" + case protoreflect.BytesKind: + goType = "[]byte" + pointer = false // rely on nullability of slices for presence + case protoreflect.MessageKind, protoreflect.GroupKind: + goType = "*" + g.QualifiedGoIdent(field.Message.GoIdent) + pointer = false // pointer captured as part of the type + } + switch { + case field.Desc.IsList(): + return "[]" + goType, false + case field.Desc.IsMap(): + keyType, _ := fieldGoType(g, f, field.Message.Fields[0]) + valType, _ := fieldGoType(g, f, field.Message.Fields[1]) + return fmt.Sprintf("map[%v]%v", keyType, valType), false + } + return goType, pointer +} + +func fieldProtobufTagValue(field *protogen.Field) string { + var enumName string + if field.Desc.Kind() == protoreflect.EnumKind { + enumName = protoimpl.X.LegacyEnumName(field.Enum.Desc) + } + return tag.Marshal(field.Desc, enumName) +} + +func fieldDefaultValue(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, field *protogen.Field) string { + if field.Desc.IsList() { + return "nil" + } + if field.Desc.HasDefault() { + defVarName := "Default_" + m.GoIdent.GoName + "_" + field.GoName + if field.Desc.Kind() == protoreflect.BytesKind { + return "append([]byte(nil), " + defVarName + "...)" + } + return defVarName + } + switch field.Desc.Kind() { + case protoreflect.BoolKind: + return "false" + case protoreflect.StringKind: + return `""` + case protoreflect.MessageKind, protoreflect.GroupKind, protoreflect.BytesKind: + return "nil" + case protoreflect.EnumKind: + val := field.Enum.Values[0] + if val.GoIdent.GoImportPath == f.GoImportPath { + return g.QualifiedGoIdent(val.GoIdent) + } else { + // If the enum value is declared in a different Go package, + // reference it by number since the name may not be correct. + // See https://github.com/golang/protobuf/issues/513. + return g.QualifiedGoIdent(field.Enum.GoIdent) + "(" + strconv.FormatInt(int64(val.Desc.Number()), 10) + ")" + } + default: + return "0" + } +} + +func fieldJSONTagValue(field *protogen.Field) string { + return string(field.Desc.Name()) + ",omitempty" +} + +func genExtensions(g *protogen.GeneratedFile, f *fileInfo) { + if len(f.allExtensions) == 0 { + return + } + + g.P("var ", extensionTypesVarName(f), " = []", protoimplPackage.Ident("ExtensionInfo"), "{") + for _, x := range f.allExtensions { + g.P("{") + g.P("ExtendedType: (*", x.Extendee.GoIdent, ")(nil),") + goType, pointer := fieldGoType(g, f, x.Extension) + if pointer { + goType = "*" + goType + } + g.P("ExtensionType: (", goType, ")(nil),") + g.P("Field: ", x.Desc.Number(), ",") + g.P("Name: ", strconv.Quote(string(x.Desc.FullName())), ",") + g.P("Tag: ", strconv.Quote(fieldProtobufTagValue(x.Extension)), ",") + g.P("Filename: ", strconv.Quote(f.Desc.Path()), ",") + g.P("},") + } + g.P("}") + g.P() + + // Group extensions by the target message. + var orderedTargets []protogen.GoIdent + allExtensionsByTarget := make(map[protogen.GoIdent][]*extensionInfo) + allExtensionsByPtr := make(map[*extensionInfo]int) + for i, x := range f.allExtensions { + target := x.Extendee.GoIdent + if len(allExtensionsByTarget[target]) == 0 { + orderedTargets = append(orderedTargets, target) + } + allExtensionsByTarget[target] = append(allExtensionsByTarget[target], x) + allExtensionsByPtr[x] = i + } + for _, target := range orderedTargets { + g.P("// Extension fields to ", target, ".") + g.P("var (") + for _, x := range allExtensionsByTarget[target] { + xd := x.Desc + typeName := xd.Kind().String() + switch xd.Kind() { + case protoreflect.EnumKind: + typeName = string(xd.Enum().FullName()) + case protoreflect.MessageKind, protoreflect.GroupKind: + typeName = string(xd.Message().FullName()) + } + fieldName := string(xd.Name()) + + leadingComments := x.Comments.Leading + if leadingComments != "" { + leadingComments += "\n" + } + leadingComments += protogen.Comments(fmt.Sprintf(" %v %v %v = %v;\n", + xd.Cardinality(), typeName, fieldName, xd.Number())) + leadingComments = appendDeprecationSuffix(leadingComments, + x.Desc.ParentFile(), + x.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + g.P(leadingComments, + "E_", x.GoIdent, " = &", extensionTypesVarName(f), "[", allExtensionsByPtr[x], "]", + trailingComment(x.Comments.Trailing)) + } + g.P(")") + g.P() + } +} + +// genMessageOneofWrapperTypes generates the oneof wrapper types and +// associates the types with the parent message type. +func genMessageOneofWrapperTypes(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + for _, oneof := range m.Oneofs { + if oneof.Desc.IsSynthetic() { + continue + } + ifName := oneofInterfaceName(oneof) + g.P("type ", ifName, " interface {") + g.P(ifName, "()") + g.P("}") + g.P() + for _, field := range oneof.Fields { + g.Annotate(field.GoIdent.GoName, field.Location) + g.Annotate(field.GoIdent.GoName+"."+field.GoName, field.Location) + g.P("type ", field.GoIdent, " struct {") + goType, _ := fieldGoType(g, f, field) + tags := structTags{ + {"protobuf", fieldProtobufTagValue(field)}, + } + if m.isTracked { + tags = append(tags, gotrackTags...) + } + leadingComments := appendDeprecationSuffix(field.Comments.Leading, + field.Desc.ParentFile(), + field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + g.P(leadingComments, + field.GoName, " ", goType, tags, + trailingComment(field.Comments.Trailing)) + g.P("}") + g.P() + } + for _, field := range oneof.Fields { + g.P("func (*", field.GoIdent, ") ", ifName, "() {}") + g.P() + } + } +} + +// oneofInterfaceName returns the name of the interface type implemented by +// the oneof field value types. +func oneofInterfaceName(oneof *protogen.Oneof) string { + return "is" + oneof.GoIdent.GoName +} + +// genNoInterfacePragma generates a standalone "nointerface" pragma to +// decorate methods with field-tracking support. +func genNoInterfacePragma(g *protogen.GeneratedFile, tracked bool) { + if tracked { + g.P("//go:nointerface") + g.P() + } +} + +var gotrackTags = structTags{{"go", "track"}} + +// structTags is a data structure for build idiomatic Go struct tags. +// Each [2]string is a key-value pair, where value is the unescaped string. +// +// Example: structTags{{"key", "value"}}.String() -> `key:"value"` +type structTags [][2]string + +func (tags structTags) String() string { + if len(tags) == 0 { + return "" + } + var ss []string + for _, tag := range tags { + // NOTE: When quoting the value, we need to make sure the backtick + // character does not appear. Convert all cases to the escaped hex form. + key := tag[0] + val := strings.Replace(strconv.Quote(tag[1]), "`", `\x60`, -1) + ss = append(ss, fmt.Sprintf("%s:%s", key, val)) + } + return "`" + strings.Join(ss, " ") + "`" +} + +// appendDeprecationSuffix optionally appends a deprecation notice as a suffix. +func appendDeprecationSuffix(prefix protogen.Comments, parentFile protoreflect.FileDescriptor, deprecated bool) protogen.Comments { + fileDeprecated := parentFile.Options().(*descriptorpb.FileOptions).GetDeprecated() + if !deprecated && !fileDeprecated { + return prefix + } + if prefix != "" { + prefix += "\n" + } + if fileDeprecated { + return prefix + " Deprecated: The entire proto file " + protogen.Comments(parentFile.Path()) + " is marked as deprecated.\n" + } + return prefix + " Deprecated: Marked as deprecated in " + protogen.Comments(parentFile.Path()) + ".\n" +} + +// trailingComment is like protogen.Comments, but lacks a trailing newline. +type trailingComment protogen.Comments + +func (c trailingComment) String() string { + s := strings.TrimSuffix(protogen.Comments(c).String(), "\n") + if strings.Contains(s, "\n") { + // We don't support multi-lined trailing comments as it is unclear + // how to best render them in the generated code. + return "" + } + return s +} diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go new file mode 100644 index 00000000..0048beb1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go @@ -0,0 +1,372 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal_gengo + +import ( + "fmt" + "math" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protopath" + "google.golang.org/protobuf/reflect/protorange" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +func genReflectFileDescriptor(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) { + g.P("var ", f.GoDescriptorIdent, " ", protoreflectPackage.Ident("FileDescriptor")) + g.P() + + genFileDescriptor(gen, g, f) + if len(f.allEnums) > 0 { + g.P("var ", enumTypesVarName(f), " = make([]", protoimplPackage.Ident("EnumInfo"), ",", len(f.allEnums), ")") + } + if len(f.allMessages) > 0 { + g.P("var ", messageTypesVarName(f), " = make([]", protoimplPackage.Ident("MessageInfo"), ",", len(f.allMessages), ")") + } + + // Generate a unique list of Go types for all declarations and dependencies, + // and the associated index into the type list for all dependencies. + var goTypes []string + var depIdxs []string + seen := map[protoreflect.FullName]int{} + genDep := func(name protoreflect.FullName, depSource string) { + if depSource != "" { + line := fmt.Sprintf("%d, // %d: %s -> %s", seen[name], len(depIdxs), depSource, name) + depIdxs = append(depIdxs, line) + } + } + genEnum := func(e *protogen.Enum, depSource string) { + if e != nil { + name := e.Desc.FullName() + if _, ok := seen[name]; !ok { + line := fmt.Sprintf("(%s)(0), // %d: %s", g.QualifiedGoIdent(e.GoIdent), len(goTypes), name) + goTypes = append(goTypes, line) + seen[name] = len(seen) + } + if depSource != "" { + genDep(name, depSource) + } + } + } + genMessage := func(m *protogen.Message, depSource string) { + if m != nil { + name := m.Desc.FullName() + if _, ok := seen[name]; !ok { + line := fmt.Sprintf("(*%s)(nil), // %d: %s", g.QualifiedGoIdent(m.GoIdent), len(goTypes), name) + if m.Desc.IsMapEntry() { + // Map entry messages have no associated Go type. + line = fmt.Sprintf("nil, // %d: %s", len(goTypes), name) + } + goTypes = append(goTypes, line) + seen[name] = len(seen) + } + if depSource != "" { + genDep(name, depSource) + } + } + } + + // This ordering is significant. + // See filetype.TypeBuilder.DependencyIndexes. + type offsetEntry struct { + start int + name string + } + var depOffsets []offsetEntry + for _, enum := range f.allEnums { + genEnum(enum.Enum, "") + } + for _, message := range f.allMessages { + genMessage(message.Message, "") + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "field type_name"}) + for _, message := range f.allMessages { + for _, field := range message.Fields { + if field.Desc.IsWeak() { + continue + } + source := string(field.Desc.FullName()) + genEnum(field.Enum, source+":type_name") + genMessage(field.Message, source+":type_name") + } + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "extension extendee"}) + for _, extension := range f.allExtensions { + source := string(extension.Desc.FullName()) + genMessage(extension.Extendee, source+":extendee") + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "extension type_name"}) + for _, extension := range f.allExtensions { + source := string(extension.Desc.FullName()) + genEnum(extension.Enum, source+":type_name") + genMessage(extension.Message, source+":type_name") + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "method input_type"}) + for _, service := range f.Services { + for _, method := range service.Methods { + source := string(method.Desc.FullName()) + genMessage(method.Input, source+":input_type") + } + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "method output_type"}) + for _, service := range f.Services { + for _, method := range service.Methods { + source := string(method.Desc.FullName()) + genMessage(method.Output, source+":output_type") + } + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), ""}) + for i := len(depOffsets) - 2; i >= 0; i-- { + curr, next := depOffsets[i], depOffsets[i+1] + depIdxs = append(depIdxs, fmt.Sprintf("%d, // [%d:%d] is the sub-list for %s", + curr.start, curr.start, next.start, curr.name)) + } + if len(depIdxs) > math.MaxInt32 { + panic("too many dependencies") // sanity check + } + + g.P("var ", goTypesVarName(f), " = []interface{}{") + for _, s := range goTypes { + g.P(s) + } + g.P("}") + + g.P("var ", depIdxsVarName(f), " = []int32{") + for _, s := range depIdxs { + g.P(s) + } + g.P("}") + + g.P("func init() { ", initFuncName(f.File), "() }") + + g.P("func ", initFuncName(f.File), "() {") + g.P("if ", f.GoDescriptorIdent, " != nil {") + g.P("return") + g.P("}") + + // Ensure that initialization functions for different files in the same Go + // package run in the correct order: Call the init funcs for every .proto file + // imported by this one that is in the same Go package. + for i, imps := 0, f.Desc.Imports(); i < imps.Len(); i++ { + impFile := gen.FilesByPath[imps.Get(i).Path()] + if impFile.GoImportPath != f.GoImportPath { + continue + } + g.P(initFuncName(impFile), "()") + } + + if len(f.allMessages) > 0 { + // Populate MessageInfo.Exporters. + g.P("if !", protoimplPackage.Ident("UnsafeEnabled"), " {") + for _, message := range f.allMessages { + if sf := f.allMessageFieldsByPtr[message]; len(sf.unexported) > 0 { + idx := f.allMessagesByPtr[message] + typesVar := messageTypesVarName(f) + + g.P(typesVar, "[", idx, "].Exporter = func(v interface{}, i int) interface{} {") + g.P("switch v := v.(*", message.GoIdent, "); i {") + for i := 0; i < sf.count; i++ { + if name := sf.unexported[i]; name != "" { + g.P("case ", i, ": return &v.", name) + } + } + g.P("default: return nil") + g.P("}") + g.P("}") + } + } + g.P("}") + + // Populate MessageInfo.OneofWrappers. + for _, message := range f.allMessages { + if len(message.Oneofs) > 0 { + idx := f.allMessagesByPtr[message] + typesVar := messageTypesVarName(f) + + // Associate the wrapper types by directly passing them to the MessageInfo. + g.P(typesVar, "[", idx, "].OneofWrappers = []interface{} {") + for _, oneof := range message.Oneofs { + if !oneof.Desc.IsSynthetic() { + for _, field := range oneof.Fields { + g.P("(*", field.GoIdent, ")(nil),") + } + } + } + g.P("}") + } + } + } + + g.P("type x struct{}") + g.P("out := ", protoimplPackage.Ident("TypeBuilder"), "{") + g.P("File: ", protoimplPackage.Ident("DescBuilder"), "{") + g.P("GoPackagePath: ", reflectPackage.Ident("TypeOf"), "(x{}).PkgPath(),") + g.P("RawDescriptor: ", rawDescVarName(f), ",") + g.P("NumEnums: ", len(f.allEnums), ",") + g.P("NumMessages: ", len(f.allMessages), ",") + g.P("NumExtensions: ", len(f.allExtensions), ",") + g.P("NumServices: ", len(f.Services), ",") + g.P("},") + g.P("GoTypes: ", goTypesVarName(f), ",") + g.P("DependencyIndexes: ", depIdxsVarName(f), ",") + if len(f.allEnums) > 0 { + g.P("EnumInfos: ", enumTypesVarName(f), ",") + } + if len(f.allMessages) > 0 { + g.P("MessageInfos: ", messageTypesVarName(f), ",") + } + if len(f.allExtensions) > 0 { + g.P("ExtensionInfos: ", extensionTypesVarName(f), ",") + } + g.P("}.Build()") + g.P(f.GoDescriptorIdent, " = out.File") + + // Set inputs to nil to allow GC to reclaim resources. + g.P(rawDescVarName(f), " = nil") + g.P(goTypesVarName(f), " = nil") + g.P(depIdxsVarName(f), " = nil") + g.P("}") +} + +// stripSourceRetentionFieldsFromMessage walks the given message tree recursively +// and clears any fields with the field option: [retention = RETENTION_SOURCE] +func stripSourceRetentionFieldsFromMessage(m protoreflect.Message) { + protorange.Range(m, func(ppv protopath.Values) error { + m2, ok := ppv.Index(-1).Value.Interface().(protoreflect.Message) + if !ok { + return nil + } + m2.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + fdo, ok := fd.Options().(*descriptorpb.FieldOptions) + if ok && fdo.GetRetention() == descriptorpb.FieldOptions_RETENTION_SOURCE { + m2.Clear(fd) + } + return true + }) + return nil + }) +} + +func genFileDescriptor(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) { + descProto := proto.Clone(f.Proto).(*descriptorpb.FileDescriptorProto) + descProto.SourceCodeInfo = nil // drop source code information + stripSourceRetentionFieldsFromMessage(descProto.ProtoReflect()) + b, err := proto.MarshalOptions{AllowPartial: true, Deterministic: true}.Marshal(descProto) + if err != nil { + gen.Error(err) + return + } + + g.P("var ", rawDescVarName(f), " = []byte{") + for len(b) > 0 { + n := 16 + if n > len(b) { + n = len(b) + } + + s := "" + for _, c := range b[:n] { + s += fmt.Sprintf("0x%02x,", c) + } + g.P(s) + + b = b[n:] + } + g.P("}") + g.P() + + if f.needRawDesc { + onceVar := rawDescVarName(f) + "Once" + dataVar := rawDescVarName(f) + "Data" + g.P("var (") + g.P(onceVar, " ", syncPackage.Ident("Once")) + g.P(dataVar, " = ", rawDescVarName(f)) + g.P(")") + g.P() + + g.P("func ", rawDescVarName(f), "GZIP() []byte {") + g.P(onceVar, ".Do(func() {") + g.P(dataVar, " = ", protoimplPackage.Ident("X"), ".CompressGZIP(", dataVar, ")") + g.P("})") + g.P("return ", dataVar) + g.P("}") + g.P() + } +} + +func genEnumReflectMethods(g *protogen.GeneratedFile, f *fileInfo, e *enumInfo) { + idx := f.allEnumsByPtr[e] + typesVar := enumTypesVarName(f) + + // Descriptor method. + g.P("func (", e.GoIdent, ") Descriptor() ", protoreflectPackage.Ident("EnumDescriptor"), " {") + g.P("return ", typesVar, "[", idx, "].Descriptor()") + g.P("}") + g.P() + + // Type method. + g.P("func (", e.GoIdent, ") Type() ", protoreflectPackage.Ident("EnumType"), " {") + g.P("return &", typesVar, "[", idx, "]") + g.P("}") + g.P() + + // Number method. + g.P("func (x ", e.GoIdent, ") Number() ", protoreflectPackage.Ident("EnumNumber"), " {") + g.P("return ", protoreflectPackage.Ident("EnumNumber"), "(x)") + g.P("}") + g.P() +} + +func genMessageReflectMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + idx := f.allMessagesByPtr[m] + typesVar := messageTypesVarName(f) + + // ProtoReflect method. + g.P("func (x *", m.GoIdent, ") ProtoReflect() ", protoreflectPackage.Ident("Message"), " {") + g.P("mi := &", typesVar, "[", idx, "]") + g.P("if ", protoimplPackage.Ident("UnsafeEnabled"), " && x != nil {") + g.P("ms := ", protoimplPackage.Ident("X"), ".MessageStateOf(", protoimplPackage.Ident("Pointer"), "(x))") + g.P("if ms.LoadMessageInfo() == nil {") + g.P("ms.StoreMessageInfo(mi)") + g.P("}") + g.P("return ms") + g.P("}") + g.P("return mi.MessageOf(x)") + g.P("}") + g.P() +} + +func fileVarName(f *protogen.File, suffix string) string { + prefix := f.GoDescriptorIdent.GoName + _, n := utf8.DecodeRuneInString(prefix) + prefix = strings.ToLower(prefix[:n]) + prefix[n:] + return prefix + "_" + suffix +} +func rawDescVarName(f *fileInfo) string { + return fileVarName(f.File, "rawDesc") +} +func goTypesVarName(f *fileInfo) string { + return fileVarName(f.File, "goTypes") +} +func depIdxsVarName(f *fileInfo) string { + return fileVarName(f.File, "depIdxs") +} +func enumTypesVarName(f *fileInfo) string { + return fileVarName(f.File, "enumTypes") +} +func messageTypesVarName(f *fileInfo) string { + return fileVarName(f.File, "msgTypes") +} +func extensionTypesVarName(f *fileInfo) string { + return fileVarName(f.File, "extTypes") +} +func initFuncName(f *protogen.File) string { + return fileVarName(f, "init") +} diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go new file mode 100644 index 00000000..47c4fa18 --- /dev/null +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go @@ -0,0 +1,1079 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal_gengo + +import ( + "strings" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/internal/genid" +) + +// Specialized support for well-known types are hard-coded into the generator +// as opposed to being injected in adjacent .go sources in the generated package +// in order to support specialized build systems like Bazel that always generate +// dynamically from the source .proto files. + +func genPackageKnownComment(f *fileInfo) protogen.Comments { + switch f.Desc.Path() { + case genid.File_google_protobuf_any_proto: + return ` Package anypb contains generated types for ` + genid.File_google_protobuf_any_proto + `. + + The Any message is a dynamic representation of any other message value. + It is functionally a tuple of the full name of the remote message type and + the serialized bytes of the remote message value. + + + Constructing an Any + + An Any message containing another message value is constructed using New: + + any, err := anypb.New(m) + if err != nil { + ... // handle error + } + ... // make use of any + + + Unmarshaling an Any + + With a populated Any message, the underlying message can be serialized into + a remote concrete message value in a few ways. + + If the exact concrete type is known, then a new (or pre-existing) instance + of that message can be passed to the UnmarshalTo method: + + m := new(foopb.MyMessage) + if err := any.UnmarshalTo(m); err != nil { + ... // handle error + } + ... // make use of m + + If the exact concrete type is not known, then the UnmarshalNew method can be + used to unmarshal the contents into a new instance of the remote message type: + + m, err := any.UnmarshalNew() + if err != nil { + ... // handle error + } + ... // make use of m + + UnmarshalNew uses the global type registry to resolve the message type and + construct a new instance of that message to unmarshal into. In order for a + message type to appear in the global registry, the Go type representing that + protobuf message type must be linked into the Go binary. For messages + generated by protoc-gen-go, this is achieved through an import of the + generated Go package representing a .proto file. + + A common pattern with UnmarshalNew is to use a type switch with the resulting + proto.Message value: + + switch m := m.(type) { + case *foopb.MyMessage: + ... // make use of m as a *foopb.MyMessage + case *barpb.OtherMessage: + ... // make use of m as a *barpb.OtherMessage + case *bazpb.SomeMessage: + ... // make use of m as a *bazpb.SomeMessage + } + + This pattern ensures that the generated packages containing the message types + listed in the case clauses are linked into the Go binary and therefore also + registered in the global registry. + + + Type checking an Any + + In order to type check whether an Any message represents some other message, + then use the MessageIs method: + + if any.MessageIs((*foopb.MyMessage)(nil)) { + ... // make use of any, knowing that it contains a foopb.MyMessage + } + + The MessageIs method can also be used with an allocated instance of the target + message type if the intention is to unmarshal into it if the type matches: + + m := new(foopb.MyMessage) + if any.MessageIs(m) { + if err := any.UnmarshalTo(m); err != nil { + ... // handle error + } + ... // make use of m + } + +` + case genid.File_google_protobuf_timestamp_proto: + return ` Package timestamppb contains generated types for ` + genid.File_google_protobuf_timestamp_proto + `. + + The Timestamp message represents a timestamp, + an instant in time since the Unix epoch (January 1st, 1970). + + + Conversion to a Go Time + + The AsTime method can be used to convert a Timestamp message to a + standard Go time.Time value in UTC: + + t := ts.AsTime() + ... // make use of t as a time.Time + + Converting to a time.Time is a common operation so that the extensive + set of time-based operations provided by the time package can be leveraged. + See https://golang.org/pkg/time for more information. + + The AsTime method performs the conversion on a best-effort basis. Timestamps + with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive) + are normalized during the conversion to a time.Time. To manually check for + invalid Timestamps per the documented limitations in timestamp.proto, + additionally call the CheckValid method: + + if err := ts.CheckValid(); err != nil { + ... // handle error + } + + + Conversion from a Go Time + + The timestamppb.New function can be used to construct a Timestamp message + from a standard Go time.Time value: + + ts := timestamppb.New(t) + ... // make use of ts as a *timestamppb.Timestamp + + In order to construct a Timestamp representing the current time, use Now: + + ts := timestamppb.Now() + ... // make use of ts as a *timestamppb.Timestamp + +` + case genid.File_google_protobuf_duration_proto: + return ` Package durationpb contains generated types for ` + genid.File_google_protobuf_duration_proto + `. + + The Duration message represents a signed span of time. + + + Conversion to a Go Duration + + The AsDuration method can be used to convert a Duration message to a + standard Go time.Duration value: + + d := dur.AsDuration() + ... // make use of d as a time.Duration + + Converting to a time.Duration is a common operation so that the extensive + set of time-based operations provided by the time package can be leveraged. + See https://golang.org/pkg/time for more information. + + The AsDuration method performs the conversion on a best-effort basis. + Durations with denormal values (e.g., nanoseconds beyond -99999999 and + +99999999, inclusive; or seconds and nanoseconds with opposite signs) + are normalized during the conversion to a time.Duration. To manually check for + invalid Duration per the documented limitations in duration.proto, + additionally call the CheckValid method: + + if err := dur.CheckValid(); err != nil { + ... // handle error + } + + Note that the documented limitations in duration.proto does not protect a + Duration from overflowing the representable range of a time.Duration in Go. + The AsDuration method uses saturation arithmetic such that an overflow clamps + the resulting value to the closest representable value (e.g., math.MaxInt64 + for positive overflow and math.MinInt64 for negative overflow). + + + Conversion from a Go Duration + + The durationpb.New function can be used to construct a Duration message + from a standard Go time.Duration value: + + dur := durationpb.New(d) + ... // make use of d as a *durationpb.Duration + +` + case genid.File_google_protobuf_struct_proto: + return ` Package structpb contains generated types for ` + genid.File_google_protobuf_struct_proto + `. + + The messages (i.e., Value, Struct, and ListValue) defined in struct.proto are + used to represent arbitrary JSON. The Value message represents a JSON value, + the Struct message represents a JSON object, and the ListValue message + represents a JSON array. See https://json.org for more information. + + The Value, Struct, and ListValue types have generated MarshalJSON and + UnmarshalJSON methods such that they serialize JSON equivalent to what the + messages themselves represent. Use of these types with the + "google.golang.org/protobuf/encoding/protojson" package + ensures that they will be serialized as their JSON equivalent. + + # Conversion to and from a Go interface + + The standard Go "encoding/json" package has functionality to serialize + arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and + ListValue.AsSlice methods can convert the protobuf message representation into + a form represented by interface{}, map[string]interface{}, and []interface{}. + This form can be used with other packages that operate on such data structures + and also directly with the standard json package. + + In order to convert the interface{}, map[string]interface{}, and []interface{} + forms back as Value, Struct, and ListValue messages, use the NewStruct, + NewList, and NewValue constructor functions. + + # Example usage + + Consider the following example JSON object: + + { + "firstName": "John", + "lastName": "Smith", + "isAlive": true, + "age": 27, + "address": { + "streetAddress": "21 2nd Street", + "city": "New York", + "state": "NY", + "postalCode": "10021-3100" + }, + "phoneNumbers": [ + { + "type": "home", + "number": "212 555-1234" + }, + { + "type": "office", + "number": "646 555-4567" + } + ], + "children": [], + "spouse": null + } + + To construct a Value message representing the above JSON object: + + m, err := structpb.NewValue(map[string]interface{}{ + "firstName": "John", + "lastName": "Smith", + "isAlive": true, + "age": 27, + "address": map[string]interface{}{ + "streetAddress": "21 2nd Street", + "city": "New York", + "state": "NY", + "postalCode": "10021-3100", + }, + "phoneNumbers": []interface{}{ + map[string]interface{}{ + "type": "home", + "number": "212 555-1234", + }, + map[string]interface{}{ + "type": "office", + "number": "646 555-4567", + }, + }, + "children": []interface{}{}, + "spouse": nil, + }) + if err != nil { + ... // handle error + } + ... // make use of m as a *structpb.Value +` + case genid.File_google_protobuf_field_mask_proto: + return ` Package fieldmaskpb contains generated types for ` + genid.File_google_protobuf_field_mask_proto + `. + + The FieldMask message represents a set of symbolic field paths. + The paths are specific to some target message type, + which is not stored within the FieldMask message itself. + + + Constructing a FieldMask + + The New function is used construct a FieldMask: + + var messageType *descriptorpb.DescriptorProto + fm, err := fieldmaskpb.New(messageType, "field.name", "field.number") + if err != nil { + ... // handle error + } + ... // make use of fm + + The "field.name" and "field.number" paths are valid paths according to the + google.protobuf.DescriptorProto message. Use of a path that does not correlate + to valid fields reachable from DescriptorProto would result in an error. + + Once a FieldMask message has been constructed, + the Append method can be used to insert additional paths to the path set: + + var messageType *descriptorpb.DescriptorProto + if err := fm.Append(messageType, "options"); err != nil { + ... // handle error + } + + + Type checking a FieldMask + + In order to verify that a FieldMask represents a set of fields that are + reachable from some target message type, use the IsValid method: + + var messageType *descriptorpb.DescriptorProto + if fm.IsValid(messageType) { + ... // make use of fm + } + + IsValid needs to be passed the target message type as an input since the + FieldMask message itself does not store the message type that the set of paths + are for. +` + default: + return "" + } +} + +func genMessageKnownFunctions(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + switch m.Desc.FullName() { + case genid.Any_message_fullname: + g.P("// New marshals src into a new Any instance.") + g.P("func New(src ", protoPackage.Ident("Message"), ") (*Any, error) {") + g.P(" dst := new(Any)") + g.P(" if err := dst.MarshalFrom(src); err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" return dst, nil") + g.P("}") + g.P() + + g.P("// MarshalFrom marshals src into dst as the underlying message") + g.P("// using the provided marshal options.") + g.P("//") + g.P("// If no options are specified, call dst.MarshalFrom instead.") + g.P("func MarshalFrom(dst *Any, src ", protoPackage.Ident("Message"), ", opts ", protoPackage.Ident("MarshalOptions"), ") error {") + g.P(" const urlPrefix = \"type.googleapis.com/\"") + g.P(" if src == nil {") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil source message\")") + g.P(" }") + g.P(" b, err := opts.Marshal(src)") + g.P(" if err != nil {") + g.P(" return err") + g.P(" }") + g.P(" dst.TypeUrl = urlPrefix + string(src.ProtoReflect().Descriptor().FullName())") + g.P(" dst.Value = b") + g.P(" return nil") + g.P("}") + g.P() + + g.P("// UnmarshalTo unmarshals the underlying message from src into dst") + g.P("// using the provided unmarshal options.") + g.P("// It reports an error if dst is not of the right message type.") + g.P("//") + g.P("// If no options are specified, call src.UnmarshalTo instead.") + g.P("func UnmarshalTo(src *Any, dst ", protoPackage.Ident("Message"), ", opts ", protoPackage.Ident("UnmarshalOptions"), ") error {") + g.P(" if src == nil {") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil source message\")") + g.P(" }") + g.P(" if !src.MessageIs(dst) {") + g.P(" got := dst.ProtoReflect().Descriptor().FullName()") + g.P(" want := src.MessageName()") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"mismatched message type: got %q, want %q\", got, want)") + g.P(" }") + g.P(" return opts.Unmarshal(src.GetValue(), dst)") + g.P("}") + g.P() + + g.P("// UnmarshalNew unmarshals the underlying message from src into dst,") + g.P("// which is newly created message using a type resolved from the type URL.") + g.P("// The message type is resolved according to opt.Resolver,") + g.P("// which should implement protoregistry.MessageTypeResolver.") + g.P("// It reports an error if the underlying message type could not be resolved.") + g.P("//") + g.P("// If no options are specified, call src.UnmarshalNew instead.") + g.P("func UnmarshalNew(src *Any, opts ", protoPackage.Ident("UnmarshalOptions"), ") (dst ", protoPackage.Ident("Message"), ", err error) {") + g.P(" if src.GetTypeUrl() == \"\" {") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid empty type URL\")") + g.P(" }") + g.P(" if opts.Resolver == nil {") + g.P(" opts.Resolver = ", protoregistryPackage.Ident("GlobalTypes")) + g.P(" }") + g.P(" r, ok := opts.Resolver.(", protoregistryPackage.Ident("MessageTypeResolver"), ")") + g.P(" if !ok {") + g.P(" return nil, ", protoregistryPackage.Ident("NotFound")) + g.P(" }") + g.P(" mt, err := r.FindMessageByURL(src.GetTypeUrl())") + g.P(" if err != nil {") + g.P(" if err == ", protoregistryPackage.Ident("NotFound"), " {") + g.P(" return nil, err") + g.P(" }") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"could not resolve %q: %v\", src.GetTypeUrl(), err)") + g.P(" }") + g.P(" dst = mt.New().Interface()") + g.P(" return dst, opts.Unmarshal(src.GetValue(), dst)") + g.P("}") + g.P() + + g.P("// MessageIs reports whether the underlying message is of the same type as m.") + g.P("func (x *Any) MessageIs(m ", protoPackage.Ident("Message"), ") bool {") + g.P(" if m == nil {") + g.P(" return false") + g.P(" }") + g.P(" url := x.GetTypeUrl()") + g.P(" name := string(m.ProtoReflect().Descriptor().FullName())") + g.P(" if !", stringsPackage.Ident("HasSuffix"), "(url, name) {") + g.P(" return false") + g.P(" }") + g.P(" return len(url) == len(name) || url[len(url)-len(name)-1] == '/'") + g.P("}") + g.P() + + g.P("// MessageName reports the full name of the underlying message,") + g.P("// returning an empty string if invalid.") + g.P("func (x *Any) MessageName() ", protoreflectPackage.Ident("FullName"), " {") + g.P(" url := x.GetTypeUrl()") + g.P(" name := ", protoreflectPackage.Ident("FullName"), "(url)") + g.P(" if i := ", stringsPackage.Ident("LastIndexByte"), "(url, '/'); i >= 0 {") + g.P(" name = name[i+len(\"/\"):]") + g.P(" }") + g.P(" if !name.IsValid() {") + g.P(" return \"\"") + g.P(" }") + g.P(" return name") + g.P("}") + g.P() + + g.P("// MarshalFrom marshals m into x as the underlying message.") + g.P("func (x *Any) MarshalFrom(m ", protoPackage.Ident("Message"), ") error {") + g.P(" return MarshalFrom(x, m, ", protoPackage.Ident("MarshalOptions"), "{})") + g.P("}") + g.P() + + g.P("// UnmarshalTo unmarshals the contents of the underlying message of x into m.") + g.P("// It resets m before performing the unmarshal operation.") + g.P("// It reports an error if m is not of the right message type.") + g.P("func (x *Any) UnmarshalTo(m ", protoPackage.Ident("Message"), ") error {") + g.P(" return UnmarshalTo(x, m, ", protoPackage.Ident("UnmarshalOptions"), "{})") + g.P("}") + g.P() + + g.P("// UnmarshalNew unmarshals the contents of the underlying message of x into") + g.P("// a newly allocated message of the specified type.") + g.P("// It reports an error if the underlying message type could not be resolved.") + g.P("func (x *Any) UnmarshalNew() (", protoPackage.Ident("Message"), ", error) {") + g.P(" return UnmarshalNew(x, ", protoPackage.Ident("UnmarshalOptions"), "{})") + g.P("}") + g.P() + + case genid.Timestamp_message_fullname: + g.P("// Now constructs a new Timestamp from the current time.") + g.P("func Now() *Timestamp {") + g.P(" return New(", timePackage.Ident("Now"), "())") + g.P("}") + g.P() + + g.P("// New constructs a new Timestamp from the provided time.Time.") + g.P("func New(t ", timePackage.Ident("Time"), ") *Timestamp {") + g.P(" return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())}") + g.P("}") + g.P() + + g.P("// AsTime converts x to a time.Time.") + g.P("func (x *Timestamp) AsTime() ", timePackage.Ident("Time"), " {") + g.P(" return ", timePackage.Ident("Unix"), "(int64(x.GetSeconds()), int64(x.GetNanos())).UTC()") + g.P("}") + g.P() + + g.P("// IsValid reports whether the timestamp is valid.") + g.P("// It is equivalent to CheckValid == nil.") + g.P("func (x *Timestamp) IsValid() bool {") + g.P(" return x.check() == 0") + g.P("}") + g.P() + + g.P("// CheckValid returns an error if the timestamp is invalid.") + g.P("// In particular, it checks whether the value represents a date that is") + g.P("// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive.") + g.P("// An error is reported for a nil Timestamp.") + g.P("func (x *Timestamp) CheckValid() error {") + g.P(" switch x.check() {") + g.P(" case invalidNil:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil Timestamp\")") + g.P(" case invalidUnderflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"timestamp (%v) before 0001-01-01\", x)") + g.P(" case invalidOverflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"timestamp (%v) after 9999-12-31\", x)") + g.P(" case invalidNanos:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"timestamp (%v) has out-of-range nanos\", x)") + g.P(" default:") + g.P(" return nil") + g.P(" }") + g.P("}") + g.P() + + g.P("const (") + g.P(" _ = iota") + g.P(" invalidNil") + g.P(" invalidUnderflow") + g.P(" invalidOverflow") + g.P(" invalidNanos") + g.P(")") + g.P() + + g.P("func (x *Timestamp) check() uint {") + g.P(" const minTimestamp = -62135596800 // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive") + g.P(" const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive") + g.P(" secs := x.GetSeconds()") + g.P(" nanos := x.GetNanos()") + g.P(" switch {") + g.P(" case x == nil:") + g.P(" return invalidNil") + g.P(" case secs < minTimestamp:") + g.P(" return invalidUnderflow") + g.P(" case secs > maxTimestamp:") + g.P(" return invalidOverflow") + g.P(" case nanos < 0 || nanos >= 1e9:") + g.P(" return invalidNanos") + g.P(" default:") + g.P(" return 0") + g.P(" }") + g.P("}") + g.P() + + case genid.Duration_message_fullname: + g.P("// New constructs a new Duration from the provided time.Duration.") + g.P("func New(d ", timePackage.Ident("Duration"), ") *Duration {") + g.P(" nanos := d.Nanoseconds()") + g.P(" secs := nanos / 1e9") + g.P(" nanos -= secs * 1e9") + g.P(" return &Duration{Seconds: int64(secs), Nanos: int32(nanos)}") + g.P("}") + g.P() + + g.P("// AsDuration converts x to a time.Duration,") + g.P("// returning the closest duration value in the event of overflow.") + g.P("func (x *Duration) AsDuration() ", timePackage.Ident("Duration"), " {") + g.P(" secs := x.GetSeconds()") + g.P(" nanos := x.GetNanos()") + g.P(" d := ", timePackage.Ident("Duration"), "(secs) * ", timePackage.Ident("Second")) + g.P(" overflow := d/", timePackage.Ident("Second"), " != ", timePackage.Ident("Duration"), "(secs)") + g.P(" d += ", timePackage.Ident("Duration"), "(nanos) * ", timePackage.Ident("Nanosecond")) + g.P(" overflow = overflow || (secs < 0 && nanos < 0 && d > 0)") + g.P(" overflow = overflow || (secs > 0 && nanos > 0 && d < 0)") + g.P(" if overflow {") + g.P(" switch {") + g.P(" case secs < 0:") + g.P(" return ", timePackage.Ident("Duration"), "(", mathPackage.Ident("MinInt64"), ")") + g.P(" case secs > 0:") + g.P(" return ", timePackage.Ident("Duration"), "(", mathPackage.Ident("MaxInt64"), ")") + g.P(" }") + g.P(" }") + g.P(" return d") + g.P("}") + g.P() + + g.P("// IsValid reports whether the duration is valid.") + g.P("// It is equivalent to CheckValid == nil.") + g.P("func (x *Duration) IsValid() bool {") + g.P(" return x.check() == 0") + g.P("}") + g.P() + + g.P("// CheckValid returns an error if the duration is invalid.") + g.P("// In particular, it checks whether the value is within the range of") + g.P("// -10000 years to +10000 years inclusive.") + g.P("// An error is reported for a nil Duration.") + g.P("func (x *Duration) CheckValid() error {") + g.P(" switch x.check() {") + g.P(" case invalidNil:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil Duration\")") + g.P(" case invalidUnderflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) exceeds -10000 years\", x)") + g.P(" case invalidOverflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) exceeds +10000 years\", x)") + g.P(" case invalidNanosRange:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) has out-of-range nanos\", x)") + g.P(" case invalidNanosSign:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) has seconds and nanos with different signs\", x)") + g.P(" default:") + g.P(" return nil") + g.P(" }") + g.P("}") + g.P() + + g.P("const (") + g.P(" _ = iota") + g.P(" invalidNil") + g.P(" invalidUnderflow") + g.P(" invalidOverflow") + g.P(" invalidNanosRange") + g.P(" invalidNanosSign") + g.P(")") + g.P() + + g.P("func (x *Duration) check() uint {") + g.P(" const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min") + g.P(" secs := x.GetSeconds()") + g.P(" nanos := x.GetNanos()") + g.P(" switch {") + g.P(" case x == nil:") + g.P(" return invalidNil") + g.P(" case secs < -absDuration:") + g.P(" return invalidUnderflow") + g.P(" case secs > +absDuration:") + g.P(" return invalidOverflow") + g.P(" case nanos <= -1e9 || nanos >= +1e9:") + g.P(" return invalidNanosRange") + g.P(" case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0):") + g.P(" return invalidNanosSign") + g.P(" default:") + g.P(" return 0") + g.P(" }") + g.P("}") + g.P() + + case genid.Struct_message_fullname: + g.P("// NewStruct constructs a Struct from a general-purpose Go map.") + g.P("// The map keys must be valid UTF-8.") + g.P("// The map values are converted using NewValue.") + g.P("func NewStruct(v map[string]interface{}) (*Struct, error) {") + g.P(" x := &Struct{Fields: make(map[string]*Value, len(v))}") + g.P(" for k, v := range v {") + g.P(" if !", utf8Package.Ident("ValidString"), "(k) {") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid UTF-8 in string: %q\", k)") + g.P(" }") + g.P(" var err error") + g.P(" x.Fields[k], err = NewValue(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" }") + g.P(" return x, nil") + g.P("}") + g.P() + + g.P("// AsMap converts x to a general-purpose Go map.") + g.P("// The map values are converted by calling Value.AsInterface.") + g.P("func (x *Struct) AsMap() map[string]interface{} {") + g.P(" f := x.GetFields()") + g.P(" vs := make(map[string]interface{}, len(f))") + g.P(" for k, v := range f {") + g.P(" vs[k] = v.AsInterface()") + g.P(" }") + g.P(" return vs") + g.P("}") + g.P() + + g.P("func (x *Struct) MarshalJSON() ([]byte, error) {") + g.P(" return ", protojsonPackage.Ident("Marshal"), "(x)") + g.P("}") + g.P() + + g.P("func (x *Struct) UnmarshalJSON(b []byte) error {") + g.P(" return ", protojsonPackage.Ident("Unmarshal"), "(b, x)") + g.P("}") + g.P() + + case genid.ListValue_message_fullname: + g.P("// NewList constructs a ListValue from a general-purpose Go slice.") + g.P("// The slice elements are converted using NewValue.") + g.P("func NewList(v []interface{}) (*ListValue, error) {") + g.P(" x := &ListValue{Values: make([]*Value, len(v))}") + g.P(" for i, v := range v {") + g.P(" var err error") + g.P(" x.Values[i], err = NewValue(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" }") + g.P(" return x, nil") + g.P("}") + g.P() + + g.P("// AsSlice converts x to a general-purpose Go slice.") + g.P("// The slice elements are converted by calling Value.AsInterface.") + g.P("func (x *ListValue) AsSlice() []interface{} {") + g.P(" vals := x.GetValues()") + g.P(" vs := make([]interface{}, len(vals))") + g.P(" for i, v := range vals {") + g.P(" vs[i] = v.AsInterface()") + g.P(" }") + g.P(" return vs") + g.P("}") + g.P() + + g.P("func (x *ListValue) MarshalJSON() ([]byte, error) {") + g.P(" return ", protojsonPackage.Ident("Marshal"), "(x)") + g.P("}") + g.P() + + g.P("func (x *ListValue) UnmarshalJSON(b []byte) error {") + g.P(" return ", protojsonPackage.Ident("Unmarshal"), "(b, x)") + g.P("}") + g.P() + + case genid.Value_message_fullname: + g.P("// NewValue constructs a Value from a general-purpose Go interface.") + g.P("//") + g.P("// ╔════════════════════════╤════════════════════════════════════════════╗") + g.P("// ║ Go type │ Conversion ║") + g.P("// ╠════════════════════════╪════════════════════════════════════════════╣") + g.P("// ║ nil │ stored as NullValue ║") + g.P("// ║ bool │ stored as BoolValue ║") + g.P("// ║ int, int32, int64 │ stored as NumberValue ║") + g.P("// ║ uint, uint32, uint64 │ stored as NumberValue ║") + g.P("// ║ float32, float64 │ stored as NumberValue ║") + g.P("// ║ string │ stored as StringValue; must be valid UTF-8 ║") + g.P("// ║ []byte │ stored as StringValue; base64-encoded ║") + g.P("// ║ map[string]interface{} │ stored as StructValue ║") + g.P("// ║ []interface{} │ stored as ListValue ║") + g.P("// ╚════════════════════════╧════════════════════════════════════════════╝") + g.P("//") + g.P("// When converting an int64 or uint64 to a NumberValue, numeric precision loss") + g.P("// is possible since they are stored as a float64.") + g.P("func NewValue(v interface{}) (*Value, error) {") + g.P(" switch v := v.(type) {") + g.P(" case nil:") + g.P(" return NewNullValue(), nil") + g.P(" case bool:") + g.P(" return NewBoolValue(v), nil") + g.P(" case int:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case int32:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case int64:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case uint:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case uint32:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case uint64:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case float32:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case float64:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case string:") + g.P(" if !", utf8Package.Ident("ValidString"), "(v) {") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid UTF-8 in string: %q\", v)") + g.P(" }") + g.P(" return NewStringValue(v), nil") + g.P(" case []byte:") + g.P(" s := ", base64Package.Ident("StdEncoding"), ".EncodeToString(v)") + g.P(" return NewStringValue(s), nil") + g.P(" case map[string]interface{}:") + g.P(" v2, err := NewStruct(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" return NewStructValue(v2), nil") + g.P(" case []interface{}:") + g.P(" v2, err := NewList(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" return NewListValue(v2), nil") + g.P(" default:") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid type: %T\", v)") + g.P(" }") + g.P("}") + g.P() + + g.P("// NewNullValue constructs a new null Value.") + g.P("func NewNullValue() *Value {") + g.P(" return &Value{Kind: &Value_NullValue{NullValue: NullValue_NULL_VALUE}}") + g.P("}") + g.P() + + g.P("// NewBoolValue constructs a new boolean Value.") + g.P("func NewBoolValue(v bool) *Value {") + g.P(" return &Value{Kind: &Value_BoolValue{BoolValue: v}}") + g.P("}") + g.P() + + g.P("// NewNumberValue constructs a new number Value.") + g.P("func NewNumberValue(v float64) *Value {") + g.P(" return &Value{Kind: &Value_NumberValue{NumberValue: v}}") + g.P("}") + g.P() + + g.P("// NewStringValue constructs a new string Value.") + g.P("func NewStringValue(v string) *Value {") + g.P(" return &Value{Kind: &Value_StringValue{StringValue: v}}") + g.P("}") + g.P() + + g.P("// NewStructValue constructs a new struct Value.") + g.P("func NewStructValue(v *Struct) *Value {") + g.P(" return &Value{Kind: &Value_StructValue{StructValue: v}}") + g.P("}") + g.P() + + g.P("// NewListValue constructs a new list Value.") + g.P("func NewListValue(v *ListValue) *Value {") + g.P(" return &Value{Kind: &Value_ListValue{ListValue: v}}") + g.P("}") + g.P() + + g.P("// AsInterface converts x to a general-purpose Go interface.") + g.P("//") + g.P("// Calling Value.MarshalJSON and \"encoding/json\".Marshal on this output produce") + g.P("// semantically equivalent JSON (assuming no errors occur).") + g.P("//") + g.P("// Floating-point values (i.e., \"NaN\", \"Infinity\", and \"-Infinity\") are") + g.P("// converted as strings to remain compatible with MarshalJSON.") + g.P("func (x *Value) AsInterface() interface{} {") + g.P(" switch v := x.GetKind().(type) {") + g.P(" case *Value_NumberValue:") + g.P(" if v != nil {") + g.P(" switch {") + g.P(" case ", mathPackage.Ident("IsNaN"), "(v.NumberValue):") + g.P(" return \"NaN\"") + g.P(" case ", mathPackage.Ident("IsInf"), "(v.NumberValue, +1):") + g.P(" return \"Infinity\"") + g.P(" case ", mathPackage.Ident("IsInf"), "(v.NumberValue, -1):") + g.P(" return \"-Infinity\"") + g.P(" default:") + g.P(" return v.NumberValue") + g.P(" }") + g.P(" }") + g.P(" case *Value_StringValue:") + g.P(" if v != nil {") + g.P(" return v.StringValue") + g.P(" }") + g.P(" case *Value_BoolValue:") + g.P(" if v != nil {") + g.P(" return v.BoolValue") + g.P(" }") + g.P(" case *Value_StructValue:") + g.P(" if v != nil {") + g.P(" return v.StructValue.AsMap()") + g.P(" }") + g.P(" case *Value_ListValue:") + g.P(" if v != nil {") + g.P(" return v.ListValue.AsSlice()") + g.P(" }") + g.P(" }") + g.P(" return nil") + g.P("}") + g.P() + + g.P("func (x *Value) MarshalJSON() ([]byte, error) {") + g.P(" return ", protojsonPackage.Ident("Marshal"), "(x)") + g.P("}") + g.P() + + g.P("func (x *Value) UnmarshalJSON(b []byte) error {") + g.P(" return ", protojsonPackage.Ident("Unmarshal"), "(b, x)") + g.P("}") + g.P() + + case genid.FieldMask_message_fullname: + g.P("// New constructs a field mask from a list of paths and verifies that") + g.P("// each one is valid according to the specified message type.") + g.P("func New(m ", protoPackage.Ident("Message"), ", paths ...string) (*FieldMask, error) {") + g.P(" x := new(FieldMask)") + g.P(" return x, x.Append(m, paths...)") + g.P("}") + g.P() + + g.P("// Union returns the union of all the paths in the input field masks.") + g.P("func Union(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {") + g.P(" var out []string") + g.P(" out = append(out, mx.GetPaths()...)") + g.P(" out = append(out, my.GetPaths()...)") + g.P(" for _, m := range ms {") + g.P(" out = append(out, m.GetPaths()...)") + g.P(" }") + g.P(" return &FieldMask{Paths: normalizePaths(out)}") + g.P("}") + g.P() + + g.P("// Intersect returns the intersection of all the paths in the input field masks.") + g.P("func Intersect(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {") + g.P(" var ss1, ss2 []string // reused buffers for performance") + g.P(" intersect := func(out, in []string) []string {") + g.P(" ss1 = normalizePaths(append(ss1[:0], in...))") + g.P(" ss2 = normalizePaths(append(ss2[:0], out...))") + g.P(" out = out[:0]") + g.P(" for i1, i2 := 0, 0; i1 < len(ss1) && i2 < len(ss2); {") + g.P(" switch s1, s2 := ss1[i1], ss2[i2]; {") + g.P(" case hasPathPrefix(s1, s2):") + g.P(" out = append(out, s1)") + g.P(" i1++") + g.P(" case hasPathPrefix(s2, s1):") + g.P(" out = append(out, s2)") + g.P(" i2++") + g.P(" case lessPath(s1, s2):") + g.P(" i1++") + g.P(" case lessPath(s2, s1):") + g.P(" i2++") + g.P(" }") + g.P(" }") + g.P(" return out") + g.P(" }") + g.P() + g.P(" out := Union(mx, my, ms...).GetPaths()") + g.P(" out = intersect(out, mx.GetPaths())") + g.P(" out = intersect(out, my.GetPaths())") + g.P(" for _, m := range ms {") + g.P(" out = intersect(out, m.GetPaths())") + g.P(" }") + g.P(" return &FieldMask{Paths: normalizePaths(out)}") + g.P("}") + g.P() + + g.P("// IsValid reports whether all the paths are syntactically valid and") + g.P("// refer to known fields in the specified message type.") + g.P("// It reports false for a nil FieldMask.") + g.P("func (x *FieldMask) IsValid(m ", protoPackage.Ident("Message"), ") bool {") + g.P(" paths := x.GetPaths()") + g.P(" return x != nil && numValidPaths(m, paths) == len(paths)") + g.P("}") + g.P() + + g.P("// Append appends a list of paths to the mask and verifies that each one") + g.P("// is valid according to the specified message type.") + g.P("// An invalid path is not appended and breaks insertion of subsequent paths.") + g.P("func (x *FieldMask) Append(m ", protoPackage.Ident("Message"), ", paths ...string) error {") + g.P(" numValid := numValidPaths(m, paths)") + g.P(" x.Paths = append(x.Paths, paths[:numValid]...)") + g.P(" paths = paths[numValid:]") + g.P(" if len(paths) > 0 {") + g.P(" name := m.ProtoReflect().Descriptor().FullName()") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid path %q for message %q\", paths[0], name)") + g.P(" }") + g.P(" return nil") + g.P("}") + g.P() + + g.P("func numValidPaths(m ", protoPackage.Ident("Message"), ", paths []string) int {") + g.P(" md0 := m.ProtoReflect().Descriptor()") + g.P(" for i, path := range paths {") + g.P(" md := md0") + g.P(" if !rangeFields(path, func(field string) bool {") + g.P(" // Search the field within the message.") + g.P(" if md == nil {") + g.P(" return false // not within a message") + g.P(" }") + g.P(" fd := md.Fields().ByName(", protoreflectPackage.Ident("Name"), "(field))") + g.P(" // The real field name of a group is the message name.") + g.P(" if fd == nil {") + g.P(" gd := md.Fields().ByName(", protoreflectPackage.Ident("Name"), "(", stringsPackage.Ident("ToLower"), "(field)))") + g.P(" if gd != nil && gd.Kind() == ", protoreflectPackage.Ident("GroupKind"), " && string(gd.Message().Name()) == field {") + g.P(" fd = gd") + g.P(" }") + g.P(" } else if fd.Kind() == ", protoreflectPackage.Ident("GroupKind"), " && string(fd.Message().Name()) != field {") + g.P(" fd = nil") + g.P(" }") + g.P(" if fd == nil {") + g.P(" return false // message has does not have this field") + g.P(" }") + g.P() + g.P(" // Identify the next message to search within.") + g.P(" md = fd.Message() // may be nil") + g.P() + g.P(" // Repeated fields are only allowed at the last position.") + g.P(" if fd.IsList() || fd.IsMap() {") + g.P(" md = nil") + g.P(" }") + g.P() + g.P(" return true") + g.P(" }) {") + g.P(" return i") + g.P(" }") + g.P(" }") + g.P(" return len(paths)") + g.P("}") + g.P() + + g.P("// Normalize converts the mask to its canonical form where all paths are sorted") + g.P("// and redundant paths are removed.") + g.P("func (x *FieldMask) Normalize() {") + g.P(" x.Paths = normalizePaths(x.Paths)") + g.P("}") + g.P() + g.P("func normalizePaths(paths []string) []string {") + g.P(" ", sortPackage.Ident("Slice"), "(paths, func(i, j int) bool {") + g.P(" return lessPath(paths[i], paths[j])") + g.P(" })") + g.P() + g.P(" // Elide any path that is a prefix match on the previous.") + g.P(" out := paths[:0]") + g.P(" for _, path := range paths {") + g.P(" if len(out) > 0 && hasPathPrefix(path, out[len(out)-1]) {") + g.P(" continue") + g.P(" }") + g.P(" out = append(out, path)") + g.P(" }") + g.P(" return out") + g.P("}") + g.P() + + g.P("// hasPathPrefix is like strings.HasPrefix, but further checks for either") + g.P("// an exact matche or that the prefix is delimited by a dot.") + g.P("func hasPathPrefix(path, prefix string) bool {") + g.P(" return ", stringsPackage.Ident("HasPrefix"), "(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '.')") + g.P("}") + g.P() + + g.P("// lessPath is a lexicographical comparison where dot is specially treated") + g.P("// as the smallest symbol.") + g.P("func lessPath(x, y string) bool {") + g.P(" for i := 0; i < len(x) && i < len(y); i++ {") + g.P(" if x[i] != y[i] {") + g.P(" return (x[i] - '.') < (y[i] - '.')") + g.P(" }") + g.P(" }") + g.P(" return len(x) < len(y)") + g.P("}") + g.P() + + g.P("// rangeFields is like strings.Split(path, \".\"), but avoids allocations by") + g.P("// iterating over each field in place and calling a iterator function.") + g.P("func rangeFields(path string, f func(field string) bool) bool {") + g.P(" for {") + g.P(" var field string") + g.P(" if i := ", stringsPackage.Ident("IndexByte"), "(path, '.'); i >= 0 {") + g.P(" field, path = path[:i], path[i:]") + g.P(" } else {") + g.P(" field, path = path, \"\"") + g.P(" }") + g.P() + g.P(" if !f(field) {") + g.P(" return false") + g.P(" }") + g.P() + g.P(" if len(path) == 0 {") + g.P(" return true") + g.P(" }") + g.P(" path = ", stringsPackage.Ident("TrimPrefix"), "(path, \".\")") + g.P(" }") + g.P("}") + g.P() + + case genid.BoolValue_message_fullname, + genid.Int32Value_message_fullname, + genid.Int64Value_message_fullname, + genid.UInt32Value_message_fullname, + genid.UInt64Value_message_fullname, + genid.FloatValue_message_fullname, + genid.DoubleValue_message_fullname, + genid.StringValue_message_fullname, + genid.BytesValue_message_fullname: + funcName := strings.TrimSuffix(m.GoIdent.GoName, "Value") + typeName := strings.ToLower(funcName) + switch typeName { + case "float": + typeName = "float32" + case "double": + typeName = "float64" + case "bytes": + typeName = "[]byte" + } + + g.P("// ", funcName, " stores v in a new ", m.GoIdent, " and returns a pointer to it.") + g.P("func ", funcName, "(v ", typeName, ") *", m.GoIdent, " {") + g.P(" return &", m.GoIdent, "{Value: v}") + g.P("}") + g.P() + } +} diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/main.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/main.go new file mode 100644 index 00000000..e67236d8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/main.go @@ -0,0 +1,56 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The protoc-gen-go binary is a protoc plugin to generate Go code for +// both proto2 and proto3 versions of the protocol buffer language. +// +// For more information about the usage of this plugin, see: +// https://protobuf.dev/reference/go/go-generated. +package main + +import ( + "errors" + "flag" + "fmt" + "os" + "path/filepath" + + gengo "google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo" + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/internal/version" +) + +const genGoDocURL = "https://protobuf.dev/reference/go/go-generated" +const grpcDocURL = "https://grpc.io/docs/languages/go/quickstart/#regenerate-grpc-code" + +func main() { + if len(os.Args) == 2 && os.Args[1] == "--version" { + fmt.Fprintf(os.Stdout, "%v %v\n", filepath.Base(os.Args[0]), version.String()) + os.Exit(0) + } + if len(os.Args) == 2 && os.Args[1] == "--help" { + fmt.Fprintf(os.Stdout, "See "+genGoDocURL+" for usage information.\n") + os.Exit(0) + } + + var ( + flags flag.FlagSet + plugins = flags.String("plugins", "", "deprecated option") + ) + protogen.Options{ + ParamFunc: flags.Set, + }.Run(func(gen *protogen.Plugin) error { + if *plugins != "" { + return errors.New("protoc-gen-go: plugins are not supported; use 'protoc --go-grpc_out=...' to generate gRPC\n\n" + + "See " + grpcDocURL + " for more information.") + } + for _, f := range gen.Files { + if f.Generate { + gengo.GenerateFile(gen, f) + } + } + gen.SupportedFeatures = gengo.SupportedFeatures + return nil + }) +} diff --git a/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go b/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go new file mode 100644 index 00000000..2d2171e5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go @@ -0,0 +1,1357 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protogen provides support for writing protoc plugins. +// +// Plugins for protoc, the Protocol Buffer compiler, +// are programs which read a CodeGeneratorRequest message from standard input +// and write a CodeGeneratorResponse message to standard output. +// This package provides support for writing plugins which generate Go code. +package protogen + +import ( + "bufio" + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "go/types" + "io/ioutil" + "os" + "path" + "path/filepath" + "sort" + "strconv" + "strings" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/dynamicpb" + "google.golang.org/protobuf/types/pluginpb" +) + +const goPackageDocURL = "https://protobuf.dev/reference/go/go-generated#package" + +// Run executes a function as a protoc plugin. +// +// It reads a CodeGeneratorRequest message from os.Stdin, invokes the plugin +// function, and writes a CodeGeneratorResponse message to os.Stdout. +// +// If a failure occurs while reading or writing, Run prints an error to +// os.Stderr and calls os.Exit(1). +func (opts Options) Run(f func(*Plugin) error) { + if err := run(opts, f); err != nil { + fmt.Fprintf(os.Stderr, "%s: %v\n", filepath.Base(os.Args[0]), err) + os.Exit(1) + } +} + +func run(opts Options, f func(*Plugin) error) error { + if len(os.Args) > 1 { + return fmt.Errorf("unknown argument %q (this program should be run by protoc, not directly)", os.Args[1]) + } + in, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return err + } + req := &pluginpb.CodeGeneratorRequest{} + if err := proto.Unmarshal(in, req); err != nil { + return err + } + gen, err := opts.New(req) + if err != nil { + return err + } + if err := f(gen); err != nil { + // Errors from the plugin function are reported by setting the + // error field in the CodeGeneratorResponse. + // + // In contrast, errors that indicate a problem in protoc + // itself (unparsable input, I/O errors, etc.) are reported + // to stderr. + gen.Error(err) + } + resp := gen.Response() + out, err := proto.Marshal(resp) + if err != nil { + return err + } + if _, err := os.Stdout.Write(out); err != nil { + return err + } + return nil +} + +// A Plugin is a protoc plugin invocation. +type Plugin struct { + // Request is the CodeGeneratorRequest provided by protoc. + Request *pluginpb.CodeGeneratorRequest + + // Files is the set of files to generate and everything they import. + // Files appear in topological order, so each file appears before any + // file that imports it. + Files []*File + FilesByPath map[string]*File + + // SupportedFeatures is the set of protobuf language features supported by + // this generator plugin. See the documentation for + // google.protobuf.CodeGeneratorResponse.supported_features for details. + SupportedFeatures uint64 + + fileReg *protoregistry.Files + enumsByName map[protoreflect.FullName]*Enum + messagesByName map[protoreflect.FullName]*Message + annotateCode bool + pathType pathType + module string + genFiles []*GeneratedFile + opts Options + err error +} + +type Options struct { + // If ParamFunc is non-nil, it will be called with each unknown + // generator parameter. + // + // Plugins for protoc can accept parameters from the command line, + // passed in the --_out protoc, separated from the output + // directory with a colon; e.g., + // + // --go_out==,=: + // + // Parameters passed in this fashion as a comma-separated list of + // key=value pairs will be passed to the ParamFunc. + // + // The (flag.FlagSet).Set method matches this function signature, + // so parameters can be converted into flags as in the following: + // + // var flags flag.FlagSet + // value := flags.Bool("param", false, "") + // opts := &protogen.Options{ + // ParamFunc: flags.Set, + // } + // protogen.Run(opts, func(p *protogen.Plugin) error { + // if *value { ... } + // }) + ParamFunc func(name, value string) error + + // ImportRewriteFunc is called with the import path of each package + // imported by a generated file. It returns the import path to use + // for this package. + ImportRewriteFunc func(GoImportPath) GoImportPath +} + +// New returns a new Plugin. +func (opts Options) New(req *pluginpb.CodeGeneratorRequest) (*Plugin, error) { + gen := &Plugin{ + Request: req, + FilesByPath: make(map[string]*File), + fileReg: new(protoregistry.Files), + enumsByName: make(map[protoreflect.FullName]*Enum), + messagesByName: make(map[protoreflect.FullName]*Message), + opts: opts, + } + + packageNames := make(map[string]GoPackageName) // filename -> package name + importPaths := make(map[string]GoImportPath) // filename -> import path + for _, param := range strings.Split(req.GetParameter(), ",") { + var value string + if i := strings.Index(param, "="); i >= 0 { + value = param[i+1:] + param = param[0:i] + } + switch param { + case "": + // Ignore. + case "module": + gen.module = value + case "paths": + switch value { + case "import": + gen.pathType = pathTypeImport + case "source_relative": + gen.pathType = pathTypeSourceRelative + default: + return nil, fmt.Errorf(`unknown path type %q: want "import" or "source_relative"`, value) + } + case "annotate_code": + switch value { + case "true", "": + gen.annotateCode = true + case "false": + default: + return nil, fmt.Errorf(`bad value for parameter %q: want "true" or "false"`, param) + } + default: + if param[0] == 'M' { + impPath, pkgName := splitImportPathAndPackageName(value) + if pkgName != "" { + packageNames[param[1:]] = pkgName + } + if impPath != "" { + importPaths[param[1:]] = impPath + } + continue + } + if opts.ParamFunc != nil { + if err := opts.ParamFunc(param, value); err != nil { + return nil, err + } + } + } + } + + // When the module= option is provided, we strip the module name + // prefix from generated files. This only makes sense if generated + // filenames are based on the import path. + if gen.module != "" && gen.pathType == pathTypeSourceRelative { + return nil, fmt.Errorf("cannot use module= with paths=source_relative") + } + + // Figure out the import path and package name for each file. + // + // The rules here are complicated and have grown organically over time. + // Interactions between different ways of specifying package information + // may be surprising. + // + // The recommended approach is to include a go_package option in every + // .proto source file specifying the full import path of the Go package + // associated with this file. + // + // option go_package = "google.golang.org/protobuf/types/known/anypb"; + // + // Alternatively, build systems which want to exert full control over + // import paths may specify M= flags. + for _, fdesc := range gen.Request.ProtoFile { + // The "M" command-line flags take precedence over + // the "go_package" option in the .proto source file. + filename := fdesc.GetName() + impPath, pkgName := splitImportPathAndPackageName(fdesc.GetOptions().GetGoPackage()) + if importPaths[filename] == "" && impPath != "" { + importPaths[filename] = impPath + } + if packageNames[filename] == "" && pkgName != "" { + packageNames[filename] = pkgName + } + switch { + case importPaths[filename] == "": + // The import path must be specified one way or another. + return nil, fmt.Errorf( + "unable to determine Go import path for %q\n\n"+ + "Please specify either:\n"+ + "\t• a \"go_package\" option in the .proto source file, or\n"+ + "\t• a \"M\" argument on the command line.\n\n"+ + "See %v for more information.\n", + fdesc.GetName(), goPackageDocURL) + case !strings.Contains(string(importPaths[filename]), ".") && + !strings.Contains(string(importPaths[filename]), "/"): + // Check that import paths contain at least a dot or slash to avoid + // a common mistake where import path is confused with package name. + return nil, fmt.Errorf( + "invalid Go import path %q for %q\n\n"+ + "The import path must contain at least one period ('.') or forward slash ('/') character.\n\n"+ + "See %v for more information.\n", + string(importPaths[filename]), fdesc.GetName(), goPackageDocURL) + case packageNames[filename] == "": + // If the package name is not explicitly specified, + // then derive a reasonable package name from the import path. + // + // NOTE: The package name is derived first from the import path in + // the "go_package" option (if present) before trying the "M" flag. + // The inverted order for this is because the primary use of the "M" + // flag is by build systems that have full control over the + // import paths all packages, where it is generally expected that + // the Go package name still be identical for the Go toolchain and + // for custom build systems like Bazel. + if impPath == "" { + impPath = importPaths[filename] + } + packageNames[filename] = cleanPackageName(path.Base(string(impPath))) + } + } + + // Consistency check: Every file with the same Go import path should have + // the same Go package name. + packageFiles := make(map[GoImportPath][]string) + for filename, importPath := range importPaths { + if _, ok := packageNames[filename]; !ok { + // Skip files mentioned in a M= parameter + // but which do not appear in the CodeGeneratorRequest. + continue + } + packageFiles[importPath] = append(packageFiles[importPath], filename) + } + for importPath, filenames := range packageFiles { + for i := 1; i < len(filenames); i++ { + if a, b := packageNames[filenames[0]], packageNames[filenames[i]]; a != b { + return nil, fmt.Errorf("Go package %v has inconsistent names %v (%v) and %v (%v)", + importPath, a, filenames[0], b, filenames[i]) + } + } + } + + // The extracted types from the full import set + typeRegistry := newExtensionRegistry() + for _, fdesc := range gen.Request.ProtoFile { + filename := fdesc.GetName() + if gen.FilesByPath[filename] != nil { + return nil, fmt.Errorf("duplicate file name: %q", filename) + } + f, err := newFile(gen, fdesc, packageNames[filename], importPaths[filename]) + if err != nil { + return nil, err + } + gen.Files = append(gen.Files, f) + gen.FilesByPath[filename] = f + if err = typeRegistry.registerAllExtensionsFromFile(f.Desc); err != nil { + return nil, err + } + } + for _, filename := range gen.Request.FileToGenerate { + f, ok := gen.FilesByPath[filename] + if !ok { + return nil, fmt.Errorf("no descriptor for generated file: %v", filename) + } + f.Generate = true + } + + // Create fully-linked descriptors if new extensions were found + if typeRegistry.hasNovelExtensions() { + for _, f := range gen.Files { + b, err := proto.Marshal(f.Proto.ProtoReflect().Interface()) + if err != nil { + return nil, err + } + err = proto.UnmarshalOptions{Resolver: typeRegistry}.Unmarshal(b, f.Proto) + if err != nil { + return nil, err + } + } + } + return gen, nil +} + +// Error records an error in code generation. The generator will report the +// error back to protoc and will not produce output. +func (gen *Plugin) Error(err error) { + if gen.err == nil { + gen.err = err + } +} + +// Response returns the generator output. +func (gen *Plugin) Response() *pluginpb.CodeGeneratorResponse { + resp := &pluginpb.CodeGeneratorResponse{} + if gen.err != nil { + resp.Error = proto.String(gen.err.Error()) + return resp + } + for _, g := range gen.genFiles { + if g.skip { + continue + } + content, err := g.Content() + if err != nil { + return &pluginpb.CodeGeneratorResponse{ + Error: proto.String(err.Error()), + } + } + filename := g.filename + if gen.module != "" { + trim := gen.module + "/" + if !strings.HasPrefix(filename, trim) { + return &pluginpb.CodeGeneratorResponse{ + Error: proto.String(fmt.Sprintf("%v: generated file does not match prefix %q", filename, gen.module)), + } + } + filename = strings.TrimPrefix(filename, trim) + } + resp.File = append(resp.File, &pluginpb.CodeGeneratorResponse_File{ + Name: proto.String(filename), + Content: proto.String(string(content)), + }) + if gen.annotateCode && strings.HasSuffix(g.filename, ".go") { + meta, err := g.metaFile(content) + if err != nil { + return &pluginpb.CodeGeneratorResponse{ + Error: proto.String(err.Error()), + } + } + resp.File = append(resp.File, &pluginpb.CodeGeneratorResponse_File{ + Name: proto.String(filename + ".meta"), + Content: proto.String(meta), + }) + } + } + if gen.SupportedFeatures > 0 { + resp.SupportedFeatures = proto.Uint64(gen.SupportedFeatures) + } + return resp +} + +// A File describes a .proto source file. +type File struct { + Desc protoreflect.FileDescriptor + Proto *descriptorpb.FileDescriptorProto + + GoDescriptorIdent GoIdent // name of Go variable for the file descriptor + GoPackageName GoPackageName // name of this file's Go package + GoImportPath GoImportPath // import path of this file's Go package + + Enums []*Enum // top-level enum declarations + Messages []*Message // top-level message declarations + Extensions []*Extension // top-level extension declarations + Services []*Service // top-level service declarations + + Generate bool // true if we should generate code for this file + + // GeneratedFilenamePrefix is used to construct filenames for generated + // files associated with this source file. + // + // For example, the source file "dir/foo.proto" might have a filename prefix + // of "dir/foo". Appending ".pb.go" produces an output file of "dir/foo.pb.go". + GeneratedFilenamePrefix string + + location Location +} + +func newFile(gen *Plugin, p *descriptorpb.FileDescriptorProto, packageName GoPackageName, importPath GoImportPath) (*File, error) { + desc, err := protodesc.NewFile(p, gen.fileReg) + if err != nil { + return nil, fmt.Errorf("invalid FileDescriptorProto %q: %v", p.GetName(), err) + } + if err := gen.fileReg.RegisterFile(desc); err != nil { + return nil, fmt.Errorf("cannot register descriptor %q: %v", p.GetName(), err) + } + f := &File{ + Desc: desc, + Proto: p, + GoPackageName: packageName, + GoImportPath: importPath, + location: Location{SourceFile: desc.Path()}, + } + + // Determine the prefix for generated Go files. + prefix := p.GetName() + if ext := path.Ext(prefix); ext == ".proto" || ext == ".protodevel" { + prefix = prefix[:len(prefix)-len(ext)] + } + switch gen.pathType { + case pathTypeImport: + // If paths=import, the output filename is derived from the Go import path. + prefix = path.Join(string(f.GoImportPath), path.Base(prefix)) + case pathTypeSourceRelative: + // If paths=source_relative, the output filename is derived from + // the input filename. + } + f.GoDescriptorIdent = GoIdent{ + GoName: "File_" + strs.GoSanitized(p.GetName()), + GoImportPath: f.GoImportPath, + } + f.GeneratedFilenamePrefix = prefix + + for i, eds := 0, desc.Enums(); i < eds.Len(); i++ { + f.Enums = append(f.Enums, newEnum(gen, f, nil, eds.Get(i))) + } + for i, mds := 0, desc.Messages(); i < mds.Len(); i++ { + f.Messages = append(f.Messages, newMessage(gen, f, nil, mds.Get(i))) + } + for i, xds := 0, desc.Extensions(); i < xds.Len(); i++ { + f.Extensions = append(f.Extensions, newField(gen, f, nil, xds.Get(i))) + } + for i, sds := 0, desc.Services(); i < sds.Len(); i++ { + f.Services = append(f.Services, newService(gen, f, sds.Get(i))) + } + for _, message := range f.Messages { + if err := message.resolveDependencies(gen); err != nil { + return nil, err + } + } + for _, extension := range f.Extensions { + if err := extension.resolveDependencies(gen); err != nil { + return nil, err + } + } + for _, service := range f.Services { + for _, method := range service.Methods { + if err := method.resolveDependencies(gen); err != nil { + return nil, err + } + } + } + return f, nil +} + +// splitImportPathAndPackageName splits off the optional Go package name +// from the Go import path when separated by a ';' delimiter. +func splitImportPathAndPackageName(s string) (GoImportPath, GoPackageName) { + if i := strings.Index(s, ";"); i >= 0 { + return GoImportPath(s[:i]), GoPackageName(s[i+1:]) + } + return GoImportPath(s), "" +} + +// An Enum describes an enum. +type Enum struct { + Desc protoreflect.EnumDescriptor + + GoIdent GoIdent // name of the generated Go type + + Values []*EnumValue // enum value declarations + + Location Location // location of this enum + Comments CommentSet // comments associated with this enum +} + +func newEnum(gen *Plugin, f *File, parent *Message, desc protoreflect.EnumDescriptor) *Enum { + var loc Location + if parent != nil { + loc = parent.Location.appendPath(genid.DescriptorProto_EnumType_field_number, desc.Index()) + } else { + loc = f.location.appendPath(genid.FileDescriptorProto_EnumType_field_number, desc.Index()) + } + enum := &Enum{ + Desc: desc, + GoIdent: newGoIdent(f, desc), + Location: loc, + Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)), + } + gen.enumsByName[desc.FullName()] = enum + for i, vds := 0, enum.Desc.Values(); i < vds.Len(); i++ { + enum.Values = append(enum.Values, newEnumValue(gen, f, parent, enum, vds.Get(i))) + } + return enum +} + +// An EnumValue describes an enum value. +type EnumValue struct { + Desc protoreflect.EnumValueDescriptor + + GoIdent GoIdent // name of the generated Go declaration + + Parent *Enum // enum in which this value is declared + + Location Location // location of this enum value + Comments CommentSet // comments associated with this enum value +} + +func newEnumValue(gen *Plugin, f *File, message *Message, enum *Enum, desc protoreflect.EnumValueDescriptor) *EnumValue { + // A top-level enum value's name is: EnumName_ValueName + // An enum value contained in a message is: MessageName_ValueName + // + // For historical reasons, enum value names are not camel-cased. + parentIdent := enum.GoIdent + if message != nil { + parentIdent = message.GoIdent + } + name := parentIdent.GoName + "_" + string(desc.Name()) + loc := enum.Location.appendPath(genid.EnumDescriptorProto_Value_field_number, desc.Index()) + return &EnumValue{ + Desc: desc, + GoIdent: f.GoImportPath.Ident(name), + Parent: enum, + Location: loc, + Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)), + } +} + +// A Message describes a message. +type Message struct { + Desc protoreflect.MessageDescriptor + + GoIdent GoIdent // name of the generated Go type + + Fields []*Field // message field declarations + Oneofs []*Oneof // message oneof declarations + + Enums []*Enum // nested enum declarations + Messages []*Message // nested message declarations + Extensions []*Extension // nested extension declarations + + Location Location // location of this message + Comments CommentSet // comments associated with this message +} + +func newMessage(gen *Plugin, f *File, parent *Message, desc protoreflect.MessageDescriptor) *Message { + var loc Location + if parent != nil { + loc = parent.Location.appendPath(genid.DescriptorProto_NestedType_field_number, desc.Index()) + } else { + loc = f.location.appendPath(genid.FileDescriptorProto_MessageType_field_number, desc.Index()) + } + message := &Message{ + Desc: desc, + GoIdent: newGoIdent(f, desc), + Location: loc, + Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)), + } + gen.messagesByName[desc.FullName()] = message + for i, eds := 0, desc.Enums(); i < eds.Len(); i++ { + message.Enums = append(message.Enums, newEnum(gen, f, message, eds.Get(i))) + } + for i, mds := 0, desc.Messages(); i < mds.Len(); i++ { + message.Messages = append(message.Messages, newMessage(gen, f, message, mds.Get(i))) + } + for i, fds := 0, desc.Fields(); i < fds.Len(); i++ { + message.Fields = append(message.Fields, newField(gen, f, message, fds.Get(i))) + } + for i, ods := 0, desc.Oneofs(); i < ods.Len(); i++ { + message.Oneofs = append(message.Oneofs, newOneof(gen, f, message, ods.Get(i))) + } + for i, xds := 0, desc.Extensions(); i < xds.Len(); i++ { + message.Extensions = append(message.Extensions, newField(gen, f, message, xds.Get(i))) + } + + // Resolve local references between fields and oneofs. + for _, field := range message.Fields { + if od := field.Desc.ContainingOneof(); od != nil { + oneof := message.Oneofs[od.Index()] + field.Oneof = oneof + oneof.Fields = append(oneof.Fields, field) + } + } + + // Field name conflict resolution. + // + // We assume well-known method names that may be attached to a generated + // message type, as well as a 'Get*' method for each field. For each + // field in turn, we add _s to its name until there are no conflicts. + // + // Any change to the following set of method names is a potential + // incompatible API change because it may change generated field names. + // + // TODO: If we ever support a 'go_name' option to set the Go name of a + // field, we should consider dropping this entirely. The conflict + // resolution algorithm is subtle and surprising (changing the order + // in which fields appear in the .proto source file can change the + // names of fields in generated code), and does not adapt well to + // adding new per-field methods such as setters. + usedNames := map[string]bool{ + "Reset": true, + "String": true, + "ProtoMessage": true, + "Marshal": true, + "Unmarshal": true, + "ExtensionRangeArray": true, + "ExtensionMap": true, + "Descriptor": true, + } + makeNameUnique := func(name string, hasGetter bool) string { + for usedNames[name] || (hasGetter && usedNames["Get"+name]) { + name += "_" + } + usedNames[name] = true + usedNames["Get"+name] = hasGetter + return name + } + for _, field := range message.Fields { + field.GoName = makeNameUnique(field.GoName, true) + field.GoIdent.GoName = message.GoIdent.GoName + "_" + field.GoName + if field.Oneof != nil && field.Oneof.Fields[0] == field { + // Make the name for a oneof unique as well. For historical reasons, + // this assumes that a getter method is not generated for oneofs. + // This is incorrect, but fixing it breaks existing code. + field.Oneof.GoName = makeNameUnique(field.Oneof.GoName, false) + field.Oneof.GoIdent.GoName = message.GoIdent.GoName + "_" + field.Oneof.GoName + } + } + + // Oneof field name conflict resolution. + // + // This conflict resolution is incomplete as it does not consider collisions + // with other oneof field types, but fixing it breaks existing code. + for _, field := range message.Fields { + if field.Oneof != nil { + Loop: + for { + for _, nestedMessage := range message.Messages { + if nestedMessage.GoIdent == field.GoIdent { + field.GoIdent.GoName += "_" + continue Loop + } + } + for _, nestedEnum := range message.Enums { + if nestedEnum.GoIdent == field.GoIdent { + field.GoIdent.GoName += "_" + continue Loop + } + } + break Loop + } + } + } + + return message +} + +func (message *Message) resolveDependencies(gen *Plugin) error { + for _, field := range message.Fields { + if err := field.resolveDependencies(gen); err != nil { + return err + } + } + for _, message := range message.Messages { + if err := message.resolveDependencies(gen); err != nil { + return err + } + } + for _, extension := range message.Extensions { + if err := extension.resolveDependencies(gen); err != nil { + return err + } + } + return nil +} + +// A Field describes a message field. +type Field struct { + Desc protoreflect.FieldDescriptor + + // GoName is the base name of this field's Go field and methods. + // For code generated by protoc-gen-go, this means a field named + // '{{GoName}}' and a getter method named 'Get{{GoName}}'. + GoName string // e.g., "FieldName" + + // GoIdent is the base name of a top-level declaration for this field. + // For code generated by protoc-gen-go, this means a wrapper type named + // '{{GoIdent}}' for members fields of a oneof, and a variable named + // 'E_{{GoIdent}}' for extension fields. + GoIdent GoIdent // e.g., "MessageName_FieldName" + + Parent *Message // message in which this field is declared; nil if top-level extension + Oneof *Oneof // containing oneof; nil if not part of a oneof + Extendee *Message // extended message for extension fields; nil otherwise + + Enum *Enum // type for enum fields; nil otherwise + Message *Message // type for message or group fields; nil otherwise + + Location Location // location of this field + Comments CommentSet // comments associated with this field +} + +func newField(gen *Plugin, f *File, message *Message, desc protoreflect.FieldDescriptor) *Field { + var loc Location + switch { + case desc.IsExtension() && message == nil: + loc = f.location.appendPath(genid.FileDescriptorProto_Extension_field_number, desc.Index()) + case desc.IsExtension() && message != nil: + loc = message.Location.appendPath(genid.DescriptorProto_Extension_field_number, desc.Index()) + default: + loc = message.Location.appendPath(genid.DescriptorProto_Field_field_number, desc.Index()) + } + camelCased := strs.GoCamelCase(string(desc.Name())) + var parentPrefix string + if message != nil { + parentPrefix = message.GoIdent.GoName + "_" + } + field := &Field{ + Desc: desc, + GoName: camelCased, + GoIdent: GoIdent{ + GoImportPath: f.GoImportPath, + GoName: parentPrefix + camelCased, + }, + Parent: message, + Location: loc, + Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)), + } + return field +} + +func (field *Field) resolveDependencies(gen *Plugin) error { + desc := field.Desc + switch desc.Kind() { + case protoreflect.EnumKind: + name := field.Desc.Enum().FullName() + enum, ok := gen.enumsByName[name] + if !ok { + return fmt.Errorf("field %v: no descriptor for enum %v", desc.FullName(), name) + } + field.Enum = enum + case protoreflect.MessageKind, protoreflect.GroupKind: + name := desc.Message().FullName() + message, ok := gen.messagesByName[name] + if !ok { + return fmt.Errorf("field %v: no descriptor for type %v", desc.FullName(), name) + } + field.Message = message + } + if desc.IsExtension() { + name := desc.ContainingMessage().FullName() + message, ok := gen.messagesByName[name] + if !ok { + return fmt.Errorf("field %v: no descriptor for type %v", desc.FullName(), name) + } + field.Extendee = message + } + return nil +} + +// A Oneof describes a message oneof. +type Oneof struct { + Desc protoreflect.OneofDescriptor + + // GoName is the base name of this oneof's Go field and methods. + // For code generated by protoc-gen-go, this means a field named + // '{{GoName}}' and a getter method named 'Get{{GoName}}'. + GoName string // e.g., "OneofName" + + // GoIdent is the base name of a top-level declaration for this oneof. + GoIdent GoIdent // e.g., "MessageName_OneofName" + + Parent *Message // message in which this oneof is declared + + Fields []*Field // fields that are part of this oneof + + Location Location // location of this oneof + Comments CommentSet // comments associated with this oneof +} + +func newOneof(gen *Plugin, f *File, message *Message, desc protoreflect.OneofDescriptor) *Oneof { + loc := message.Location.appendPath(genid.DescriptorProto_OneofDecl_field_number, desc.Index()) + camelCased := strs.GoCamelCase(string(desc.Name())) + parentPrefix := message.GoIdent.GoName + "_" + return &Oneof{ + Desc: desc, + Parent: message, + GoName: camelCased, + GoIdent: GoIdent{ + GoImportPath: f.GoImportPath, + GoName: parentPrefix + camelCased, + }, + Location: loc, + Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)), + } +} + +// Extension is an alias of Field for documentation. +type Extension = Field + +// A Service describes a service. +type Service struct { + Desc protoreflect.ServiceDescriptor + + GoName string + + Methods []*Method // service method declarations + + Location Location // location of this service + Comments CommentSet // comments associated with this service +} + +func newService(gen *Plugin, f *File, desc protoreflect.ServiceDescriptor) *Service { + loc := f.location.appendPath(genid.FileDescriptorProto_Service_field_number, desc.Index()) + service := &Service{ + Desc: desc, + GoName: strs.GoCamelCase(string(desc.Name())), + Location: loc, + Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)), + } + for i, mds := 0, desc.Methods(); i < mds.Len(); i++ { + service.Methods = append(service.Methods, newMethod(gen, f, service, mds.Get(i))) + } + return service +} + +// A Method describes a method in a service. +type Method struct { + Desc protoreflect.MethodDescriptor + + GoName string + + Parent *Service // service in which this method is declared + + Input *Message + Output *Message + + Location Location // location of this method + Comments CommentSet // comments associated with this method +} + +func newMethod(gen *Plugin, f *File, service *Service, desc protoreflect.MethodDescriptor) *Method { + loc := service.Location.appendPath(genid.ServiceDescriptorProto_Method_field_number, desc.Index()) + method := &Method{ + Desc: desc, + GoName: strs.GoCamelCase(string(desc.Name())), + Parent: service, + Location: loc, + Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)), + } + return method +} + +func (method *Method) resolveDependencies(gen *Plugin) error { + desc := method.Desc + + inName := desc.Input().FullName() + in, ok := gen.messagesByName[inName] + if !ok { + return fmt.Errorf("method %v: no descriptor for type %v", desc.FullName(), inName) + } + method.Input = in + + outName := desc.Output().FullName() + out, ok := gen.messagesByName[outName] + if !ok { + return fmt.Errorf("method %v: no descriptor for type %v", desc.FullName(), outName) + } + method.Output = out + + return nil +} + +// A GeneratedFile is a generated file. +type GeneratedFile struct { + gen *Plugin + skip bool + filename string + goImportPath GoImportPath + buf bytes.Buffer + packageNames map[GoImportPath]GoPackageName + usedPackageNames map[GoPackageName]bool + manualImports map[GoImportPath]bool + annotations map[string][]Location +} + +// NewGeneratedFile creates a new generated file with the given filename +// and import path. +func (gen *Plugin) NewGeneratedFile(filename string, goImportPath GoImportPath) *GeneratedFile { + g := &GeneratedFile{ + gen: gen, + filename: filename, + goImportPath: goImportPath, + packageNames: make(map[GoImportPath]GoPackageName), + usedPackageNames: make(map[GoPackageName]bool), + manualImports: make(map[GoImportPath]bool), + annotations: make(map[string][]Location), + } + + // All predeclared identifiers in Go are already used. + for _, s := range types.Universe.Names() { + g.usedPackageNames[GoPackageName(s)] = true + } + + gen.genFiles = append(gen.genFiles, g) + return g +} + +// P prints a line to the generated output. It converts each parameter to a +// string following the same rules as fmt.Print. It never inserts spaces +// between parameters. +func (g *GeneratedFile) P(v ...interface{}) { + for _, x := range v { + switch x := x.(type) { + case GoIdent: + fmt.Fprint(&g.buf, g.QualifiedGoIdent(x)) + default: + fmt.Fprint(&g.buf, x) + } + } + fmt.Fprintln(&g.buf) +} + +// QualifiedGoIdent returns the string to use for a Go identifier. +// +// If the identifier is from a different Go package than the generated file, +// the returned name will be qualified (package.name) and an import statement +// for the identifier's package will be included in the file. +func (g *GeneratedFile) QualifiedGoIdent(ident GoIdent) string { + if ident.GoImportPath == g.goImportPath { + return ident.GoName + } + if packageName, ok := g.packageNames[ident.GoImportPath]; ok { + return string(packageName) + "." + ident.GoName + } + packageName := cleanPackageName(path.Base(string(ident.GoImportPath))) + for i, orig := 1, packageName; g.usedPackageNames[packageName]; i++ { + packageName = orig + GoPackageName(strconv.Itoa(i)) + } + g.packageNames[ident.GoImportPath] = packageName + g.usedPackageNames[packageName] = true + return string(packageName) + "." + ident.GoName +} + +// Import ensures a package is imported by the generated file. +// +// Packages referenced by QualifiedGoIdent are automatically imported. +// Explicitly importing a package with Import is generally only necessary +// when the import will be blank (import _ "package"). +func (g *GeneratedFile) Import(importPath GoImportPath) { + g.manualImports[importPath] = true +} + +// Write implements io.Writer. +func (g *GeneratedFile) Write(p []byte) (n int, err error) { + return g.buf.Write(p) +} + +// Skip removes the generated file from the plugin output. +func (g *GeneratedFile) Skip() { + g.skip = true +} + +// Unskip reverts a previous call to Skip, re-including the generated file in +// the plugin output. +func (g *GeneratedFile) Unskip() { + g.skip = false +} + +// Annotate associates a symbol in a generated Go file with a location in a +// source .proto file. +// +// The symbol may refer to a type, constant, variable, function, method, or +// struct field. The "T.sel" syntax is used to identify the method or field +// 'sel' on type 'T'. +func (g *GeneratedFile) Annotate(symbol string, loc Location) { + g.annotations[symbol] = append(g.annotations[symbol], loc) +} + +// Content returns the contents of the generated file. +func (g *GeneratedFile) Content() ([]byte, error) { + if !strings.HasSuffix(g.filename, ".go") { + return g.buf.Bytes(), nil + } + + // Reformat generated code. + original := g.buf.Bytes() + fset := token.NewFileSet() + file, err := parser.ParseFile(fset, "", original, parser.ParseComments) + if err != nil { + // Print out the bad code with line numbers. + // This should never happen in practice, but it can while changing generated code + // so consider this a debugging aid. + var src bytes.Buffer + s := bufio.NewScanner(bytes.NewReader(original)) + for line := 1; s.Scan(); line++ { + fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes()) + } + return nil, fmt.Errorf("%v: unparsable Go source: %v\n%v", g.filename, err, src.String()) + } + + // Collect a sorted list of all imports. + var importPaths [][2]string + rewriteImport := func(importPath string) string { + if f := g.gen.opts.ImportRewriteFunc; f != nil { + return string(f(GoImportPath(importPath))) + } + return importPath + } + for importPath := range g.packageNames { + pkgName := string(g.packageNames[GoImportPath(importPath)]) + pkgPath := rewriteImport(string(importPath)) + importPaths = append(importPaths, [2]string{pkgName, pkgPath}) + } + for importPath := range g.manualImports { + if _, ok := g.packageNames[importPath]; !ok { + pkgPath := rewriteImport(string(importPath)) + importPaths = append(importPaths, [2]string{"_", pkgPath}) + } + } + sort.Slice(importPaths, func(i, j int) bool { + return importPaths[i][1] < importPaths[j][1] + }) + + // Modify the AST to include a new import block. + if len(importPaths) > 0 { + // Insert block after package statement or + // possible comment attached to the end of the package statement. + pos := file.Package + tokFile := fset.File(file.Package) + pkgLine := tokFile.Line(file.Package) + for _, c := range file.Comments { + if tokFile.Line(c.Pos()) > pkgLine { + break + } + pos = c.End() + } + + // Construct the import block. + impDecl := &ast.GenDecl{ + Tok: token.IMPORT, + TokPos: pos, + Lparen: pos, + Rparen: pos, + } + for _, importPath := range importPaths { + impDecl.Specs = append(impDecl.Specs, &ast.ImportSpec{ + Name: &ast.Ident{ + Name: importPath[0], + NamePos: pos, + }, + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(importPath[1]), + ValuePos: pos, + }, + EndPos: pos, + }) + } + file.Decls = append([]ast.Decl{impDecl}, file.Decls...) + } + + var out bytes.Buffer + if err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(&out, fset, file); err != nil { + return nil, fmt.Errorf("%v: can not reformat Go source: %v", g.filename, err) + } + return out.Bytes(), nil +} + +// metaFile returns the contents of the file's metadata file, which is a +// text formatted string of the google.protobuf.GeneratedCodeInfo. +func (g *GeneratedFile) metaFile(content []byte) (string, error) { + fset := token.NewFileSet() + astFile, err := parser.ParseFile(fset, "", content, 0) + if err != nil { + return "", err + } + info := &descriptorpb.GeneratedCodeInfo{} + + seenAnnotations := make(map[string]bool) + annotate := func(s string, ident *ast.Ident) { + seenAnnotations[s] = true + for _, loc := range g.annotations[s] { + info.Annotation = append(info.Annotation, &descriptorpb.GeneratedCodeInfo_Annotation{ + SourceFile: proto.String(loc.SourceFile), + Path: loc.Path, + Begin: proto.Int32(int32(fset.Position(ident.Pos()).Offset)), + End: proto.Int32(int32(fset.Position(ident.End()).Offset)), + }) + } + } + for _, decl := range astFile.Decls { + switch decl := decl.(type) { + case *ast.GenDecl: + for _, spec := range decl.Specs { + switch spec := spec.(type) { + case *ast.TypeSpec: + annotate(spec.Name.Name, spec.Name) + switch st := spec.Type.(type) { + case *ast.StructType: + for _, field := range st.Fields.List { + for _, name := range field.Names { + annotate(spec.Name.Name+"."+name.Name, name) + } + } + case *ast.InterfaceType: + for _, field := range st.Methods.List { + for _, name := range field.Names { + annotate(spec.Name.Name+"."+name.Name, name) + } + } + } + case *ast.ValueSpec: + for _, name := range spec.Names { + annotate(name.Name, name) + } + } + } + case *ast.FuncDecl: + if decl.Recv == nil { + annotate(decl.Name.Name, decl.Name) + } else { + recv := decl.Recv.List[0].Type + if s, ok := recv.(*ast.StarExpr); ok { + recv = s.X + } + if id, ok := recv.(*ast.Ident); ok { + annotate(id.Name+"."+decl.Name.Name, decl.Name) + } + } + } + } + for a := range g.annotations { + if !seenAnnotations[a] { + return "", fmt.Errorf("%v: no symbol matching annotation %q", g.filename, a) + } + } + + b, err := prototext.Marshal(info) + if err != nil { + return "", err + } + return string(b), nil +} + +// A GoIdent is a Go identifier, consisting of a name and import path. +// The name is a single identifier and may not be a dot-qualified selector. +type GoIdent struct { + GoName string + GoImportPath GoImportPath +} + +func (id GoIdent) String() string { return fmt.Sprintf("%q.%v", id.GoImportPath, id.GoName) } + +// newGoIdent returns the Go identifier for a descriptor. +func newGoIdent(f *File, d protoreflect.Descriptor) GoIdent { + name := strings.TrimPrefix(string(d.FullName()), string(f.Desc.Package())+".") + return GoIdent{ + GoName: strs.GoCamelCase(name), + GoImportPath: f.GoImportPath, + } +} + +// A GoImportPath is the import path of a Go package. +// For example: "google.golang.org/protobuf/compiler/protogen" +type GoImportPath string + +func (p GoImportPath) String() string { return strconv.Quote(string(p)) } + +// Ident returns a GoIdent with s as the GoName and p as the GoImportPath. +func (p GoImportPath) Ident(s string) GoIdent { + return GoIdent{GoName: s, GoImportPath: p} +} + +// A GoPackageName is the name of a Go package. e.g., "protobuf". +type GoPackageName string + +// cleanPackageName converts a string to a valid Go package name. +func cleanPackageName(name string) GoPackageName { + return GoPackageName(strs.GoSanitized(name)) +} + +type pathType int + +const ( + pathTypeImport pathType = iota + pathTypeSourceRelative +) + +// A Location is a location in a .proto source file. +// +// See the google.protobuf.SourceCodeInfo documentation in descriptor.proto +// for details. +type Location struct { + SourceFile string + Path protoreflect.SourcePath +} + +// appendPath add elements to a Location's path, returning a new Location. +func (loc Location) appendPath(num protoreflect.FieldNumber, idx int) Location { + loc.Path = append(protoreflect.SourcePath(nil), loc.Path...) // make copy + loc.Path = append(loc.Path, int32(num), int32(idx)) + return loc +} + +// CommentSet is a set of leading and trailing comments associated +// with a .proto descriptor declaration. +type CommentSet struct { + LeadingDetached []Comments + Leading Comments + Trailing Comments +} + +func makeCommentSet(loc protoreflect.SourceLocation) CommentSet { + var leadingDetached []Comments + for _, s := range loc.LeadingDetachedComments { + leadingDetached = append(leadingDetached, Comments(s)) + } + return CommentSet{ + LeadingDetached: leadingDetached, + Leading: Comments(loc.LeadingComments), + Trailing: Comments(loc.TrailingComments), + } +} + +// Comments is a comments string as provided by protoc. +type Comments string + +// String formats the comments by inserting // to the start of each line, +// ensuring that there is a trailing newline. +// An empty comment is formatted as an empty string. +func (c Comments) String() string { + if c == "" { + return "" + } + var b []byte + for _, line := range strings.Split(strings.TrimSuffix(string(c), "\n"), "\n") { + b = append(b, "//"...) + b = append(b, line...) + b = append(b, "\n"...) + } + return string(b) +} + +// extensionRegistry allows registration of new extensions defined in the .proto +// file for which we are generating bindings. +// +// Lookups consult the local type registry first and fall back to the base type +// registry which defaults to protoregistry.GlobalTypes +type extensionRegistry struct { + base *protoregistry.Types + local *protoregistry.Types +} + +func newExtensionRegistry() *extensionRegistry { + return &extensionRegistry{ + base: protoregistry.GlobalTypes, + local: &protoregistry.Types{}, + } +} + +// FindExtensionByName implements proto.UnmarshalOptions.FindExtensionByName +func (e *extensionRegistry) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + if xt, err := e.local.FindExtensionByName(field); err == nil { + return xt, nil + } + + return e.base.FindExtensionByName(field) +} + +// FindExtensionByNumber implements proto.UnmarshalOptions.FindExtensionByNumber +func (e *extensionRegistry) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + if xt, err := e.local.FindExtensionByNumber(message, field); err == nil { + return xt, nil + } + + return e.base.FindExtensionByNumber(message, field) +} + +func (e *extensionRegistry) hasNovelExtensions() bool { + return e.local.NumExtensions() > 0 +} + +func (e *extensionRegistry) registerAllExtensionsFromFile(f protoreflect.FileDescriptor) error { + if err := e.registerAllExtensions(f.Extensions()); err != nil { + return err + } + return nil +} + +func (e *extensionRegistry) registerAllExtensionsFromMessage(ms protoreflect.MessageDescriptors) error { + for i := 0; i < ms.Len(); i++ { + m := ms.Get(i) + if err := e.registerAllExtensions(m.Extensions()); err != nil { + return err + } + } + return nil +} + +func (e *extensionRegistry) registerAllExtensions(exts protoreflect.ExtensionDescriptors) error { + for i := 0; i < exts.Len(); i++ { + if err := e.registerExtension(exts.Get(i)); err != nil { + return err + } + } + return nil +} + +// registerExtension adds the given extension to the type registry if an +// extension with that full name does not exist yet. +func (e *extensionRegistry) registerExtension(xd protoreflect.ExtensionDescriptor) error { + if _, err := e.FindExtensionByName(xd.FullName()); err != protoregistry.NotFound { + // Either the extension already exists or there was an error, either way we're done. + return err + } + return e.local.RegisterExtension(dynamicpb.NewExtensionType(xd)) +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go index 00ea2fec..21d5d2cb 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -4,7 +4,7 @@ // Package protojson marshals and unmarshals protocol buffer messages as JSON // format. It follows the guide at -// https://developers.google.com/protocol-buffers/docs/proto3#json. +// https://protobuf.dev/programming-guides/proto3#json. // // This package produces a different output than the standard "encoding/json" // package, which does not operate correctly on protocol buffer messages. diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index c85f8469..6c37d417 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -814,16 +814,22 @@ func (d decoder) unmarshalTimestamp(m protoreflect.Message) error { return d.unexpectedTokenError(tok) } - t, err := time.Parse(time.RFC3339Nano, tok.ParsedString()) + s := tok.ParsedString() + t, err := time.Parse(time.RFC3339Nano, s) if err != nil { return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) } - // Validate seconds. No need to validate nanos because time.Parse would have - // covered that already. + // Validate seconds. secs := t.Unix() if secs < minTimestampSeconds || secs > maxTimestampSeconds { return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString()) } + // Validate subseconds. + i := strings.LastIndexByte(s, '.') // start of subsecond field + j := strings.LastIndexAny(s, "Z-+") // start of timezone field + if i >= 0 && j >= i && j-i > len(".999999999") { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } fds := m.Descriptor().Fields() fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index ce57f57e..f4b4686c 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package protowire parses and formats the raw wire encoding. -// See https://developers.google.com/protocol-buffers/docs/encoding. +// See https://protobuf.dev/programming-guides/encoding. // // For marshaling and unmarshaling entire protobuf messages, // use the "google.golang.org/protobuf/proto" package instead. @@ -29,12 +29,8 @@ const ( ) // IsValid reports whether the field number is semantically valid. -// -// Note that while numbers within the reserved range are semantically invalid, -// they are syntactically valid in the wire format. -// Implementations may treat records with reserved field numbers as unknown. func (n Number) IsValid() bool { - return MinValidNumber <= n && n < FirstReservedNumber || LastReservedNumber < n && n <= MaxValidNumber + return MinValidNumber <= n && n <= MaxValidNumber } // Type represents the wire type. diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go index b13fd29e..d043a6eb 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -294,7 +294,7 @@ func (d *Decoder) isValueNext() bool { } // consumeToken constructs a Token for given Kind with raw value derived from -// current d.in and given size, and consumes the given size-lenght of it. +// current d.in and given size, and consumes the given size-length of it. func (d *Decoder) consumeToken(kind Kind, size int) Token { tok := Token{ kind: kind, diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index 427c62d0..87853e78 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -412,12 +412,13 @@ func (d *Decoder) parseFieldName() (tok Token, err error) { // Field number. Identify if input is a valid number that is not negative // and is decimal integer within 32-bit range. if num := parseNumber(d.in); num.size > 0 { + str := num.string(d.in) if !num.neg && num.kind == numDec { - if _, err := strconv.ParseInt(string(d.in[:num.size]), 10, 32); err == nil { + if _, err := strconv.ParseInt(str, 10, 32); err == nil { return d.consumeToken(Name, num.size, uint8(FieldNumber)), nil } } - return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size]) + return Token{}, d.newSyntaxError("invalid field number: %s", str) } return Token{}, d.newSyntaxError("invalid field name: %s", errId(d.in)) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go index 81a5d8c8..45c81f02 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go @@ -15,17 +15,12 @@ func (d *Decoder) parseNumberValue() (Token, bool) { if num.neg { numAttrs |= isNegative } - strSize := num.size - last := num.size - 1 - if num.kind == numFloat && (d.in[last] == 'f' || d.in[last] == 'F') { - strSize = last - } tok := Token{ kind: Scalar, attrs: numberValue, pos: len(d.orig) - len(d.in), raw: d.in[:num.size], - str: string(d.in[:strSize]), + str: num.string(d.in), numAttrs: numAttrs, } d.consume(num.size) @@ -46,6 +41,27 @@ type number struct { kind uint8 neg bool size int + // if neg, this is the length of whitespace and comments between + // the minus sign and the rest fo the number literal + sep int +} + +func (num number) string(data []byte) string { + strSize := num.size + last := num.size - 1 + if num.kind == numFloat && (data[last] == 'f' || data[last] == 'F') { + strSize = last + } + if num.neg && num.sep > 0 { + // strip whitespace/comments between negative sign and the rest + strLen := strSize - num.sep + str := make([]byte, strLen) + str[0] = data[0] + copy(str[1:], data[num.sep+1:strSize]) + return string(str) + } + return string(data[:strSize]) + } // parseNumber constructs a number object from given input. It allows for the @@ -67,19 +83,22 @@ func parseNumber(input []byte) number { } // Optional - + var sep int if s[0] == '-' { neg = true s = s[1:] size++ + // Consume any whitespace or comments between the + // negative sign and the rest of the number + lenBefore := len(s) + s = consume(s, 0) + sep = lenBefore - len(s) + size += sep if len(s) == 0 { return number{} } } - // C++ allows for whitespace and comments in between the negative sign and - // the rest of the number. This logic currently does not but is consistent - // with v1. - switch { case s[0] == '0': if len(s) > 1 { @@ -116,7 +135,7 @@ func parseNumber(input []byte) number { if len(s) > 0 && !isDelim(s[0]) { return number{} } - return number{kind: kind, neg: neg, size: size} + return number{kind: kind, neg: neg, size: size, sep: sep} } } s = s[1:] @@ -188,5 +207,5 @@ func parseNumber(input []byte) number { return number{} } - return number{kind: kind, neg: neg, size: size} + return number{kind: kind, neg: neg, size: size, sep: sep} } diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index e3cdf1c2..5c0e8f73 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -50,6 +50,7 @@ const ( FileDescriptorProto_Options_field_name protoreflect.Name = "options" FileDescriptorProto_SourceCodeInfo_field_name protoreflect.Name = "source_code_info" FileDescriptorProto_Syntax_field_name protoreflect.Name = "syntax" + FileDescriptorProto_Edition_field_name protoreflect.Name = "edition" FileDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.name" FileDescriptorProto_Package_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.package" @@ -63,6 +64,7 @@ const ( FileDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.options" FileDescriptorProto_SourceCodeInfo_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.source_code_info" FileDescriptorProto_Syntax_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.syntax" + FileDescriptorProto_Edition_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.edition" ) // Field numbers for google.protobuf.FileDescriptorProto. @@ -79,6 +81,7 @@ const ( FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 + FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 13 ) // Names for google.protobuf.DescriptorProto. @@ -494,26 +497,29 @@ const ( // Field names for google.protobuf.MessageOptions. const ( - MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" - MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" - MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" - MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" - MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" + MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" - MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" - MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" - MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" - MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" - MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" + MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" + MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" + MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts" + MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" ) // Field numbers for google.protobuf.MessageOptions. const ( - MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 - MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 - MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 - MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 - MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 + MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 + MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 + MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 + MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 11 + MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) // Names for google.protobuf.FieldOptions. @@ -528,16 +534,24 @@ const ( FieldOptions_Packed_field_name protoreflect.Name = "packed" FieldOptions_Jstype_field_name protoreflect.Name = "jstype" FieldOptions_Lazy_field_name protoreflect.Name = "lazy" + FieldOptions_UnverifiedLazy_field_name protoreflect.Name = "unverified_lazy" FieldOptions_Deprecated_field_name protoreflect.Name = "deprecated" FieldOptions_Weak_field_name protoreflect.Name = "weak" + FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" + FieldOptions_Retention_field_name protoreflect.Name = "retention" + FieldOptions_Target_field_name protoreflect.Name = "target" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" FieldOptions_Packed_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.packed" FieldOptions_Jstype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.jstype" FieldOptions_Lazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.lazy" + FieldOptions_UnverifiedLazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.unverified_lazy" FieldOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.deprecated" FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" + FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact" + FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention" + FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -547,8 +561,12 @@ const ( FieldOptions_Packed_field_number protoreflect.FieldNumber = 2 FieldOptions_Jstype_field_number protoreflect.FieldNumber = 6 FieldOptions_Lazy_field_number protoreflect.FieldNumber = 5 + FieldOptions_UnverifiedLazy_field_number protoreflect.FieldNumber = 15 FieldOptions_Deprecated_field_number protoreflect.FieldNumber = 3 FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 + FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16 + FieldOptions_Retention_field_number protoreflect.FieldNumber = 17 + FieldOptions_Target_field_number protoreflect.FieldNumber = 18 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -564,6 +582,18 @@ const ( FieldOptions_JSType_enum_name = "JSType" ) +// Full and short names for google.protobuf.FieldOptions.OptionRetention. +const ( + FieldOptions_OptionRetention_enum_fullname = "google.protobuf.FieldOptions.OptionRetention" + FieldOptions_OptionRetention_enum_name = "OptionRetention" +) + +// Full and short names for google.protobuf.FieldOptions.OptionTargetType. +const ( + FieldOptions_OptionTargetType_enum_fullname = "google.protobuf.FieldOptions.OptionTargetType" + FieldOptions_OptionTargetType_enum_name = "OptionTargetType" +) + // Names for google.protobuf.OneofOptions. const ( OneofOptions_message_name protoreflect.Name = "OneofOptions" @@ -590,20 +620,23 @@ const ( // Field names for google.protobuf.EnumOptions. const ( - EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" - EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" - EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" + EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" - EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" - EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" - EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" + EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" + EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" + EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts" + EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" ) // Field numbers for google.protobuf.EnumOptions. const ( - EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 - EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 - EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 + EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 + EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 6 + EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) // Names for google.protobuf.EnumValueOptions. @@ -813,11 +846,13 @@ const ( GeneratedCodeInfo_Annotation_SourceFile_field_name protoreflect.Name = "source_file" GeneratedCodeInfo_Annotation_Begin_field_name protoreflect.Name = "begin" GeneratedCodeInfo_Annotation_End_field_name protoreflect.Name = "end" + GeneratedCodeInfo_Annotation_Semantic_field_name protoreflect.Name = "semantic" GeneratedCodeInfo_Annotation_Path_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.path" GeneratedCodeInfo_Annotation_SourceFile_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.source_file" GeneratedCodeInfo_Annotation_Begin_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.begin" GeneratedCodeInfo_Annotation_End_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.end" + GeneratedCodeInfo_Annotation_Semantic_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.semantic" ) // Field numbers for google.protobuf.GeneratedCodeInfo.Annotation. @@ -826,4 +861,11 @@ const ( GeneratedCodeInfo_Annotation_SourceFile_field_number protoreflect.FieldNumber = 2 GeneratedCodeInfo_Annotation_Begin_field_number protoreflect.FieldNumber = 3 GeneratedCodeInfo_Annotation_End_field_number protoreflect.FieldNumber = 4 + GeneratedCodeInfo_Annotation_Semantic_field_number protoreflect.FieldNumber = 5 +) + +// Full and short names for google.protobuf.GeneratedCodeInfo.Annotation.Semantic. +const ( + GeneratedCodeInfo_Annotation_Semantic_enum_fullname = "google.protobuf.GeneratedCodeInfo.Annotation.Semantic" + GeneratedCodeInfo_Annotation_Semantic_enum_name = "Semantic" ) diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index 11a6128b..185ef2ef 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -59,7 +59,6 @@ func NewConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { default: return newSingularConverter(t, fd) } - panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) } var ( diff --git a/vendor/google.golang.org/protobuf/internal/msgfmt/format.go b/vendor/google.golang.org/protobuf/internal/msgfmt/format.go new file mode 100644 index 00000000..a319550f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/msgfmt/format.go @@ -0,0 +1,261 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package msgfmt implements a text marshaler combining the desirable features +// of both the JSON and proto text formats. +// It is optimized for human readability and has no associated deserializer. +package msgfmt + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Format returns a formatted string for the message. +func Format(m proto.Message) string { + return string(appendMessage(nil, m.ProtoReflect())) +} + +// FormatValue returns a formatted string for an arbitrary value. +func FormatValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) string { + return string(appendValue(nil, v, fd)) +} + +func appendValue(b []byte, v protoreflect.Value, fd protoreflect.FieldDescriptor) []byte { + switch v := v.Interface().(type) { + case nil: + return append(b, ""...) + case bool, int32, int64, uint32, uint64, float32, float64: + return append(b, fmt.Sprint(v)...) + case string: + return append(b, strconv.Quote(string(v))...) + case []byte: + return append(b, strconv.Quote(string(v))...) + case protoreflect.EnumNumber: + return appendEnum(b, v, fd) + case protoreflect.Message: + return appendMessage(b, v) + case protoreflect.List: + return appendList(b, v, fd) + case protoreflect.Map: + return appendMap(b, v, fd) + default: + panic(fmt.Sprintf("invalid type: %T", v)) + } +} + +func appendEnum(b []byte, v protoreflect.EnumNumber, fd protoreflect.FieldDescriptor) []byte { + if fd != nil { + if ev := fd.Enum().Values().ByNumber(v); ev != nil { + return append(b, ev.Name()...) + } + } + return strconv.AppendInt(b, int64(v), 10) +} + +func appendMessage(b []byte, m protoreflect.Message) []byte { + if b2 := appendKnownMessage(b, m); b2 != nil { + return b2 + } + + b = append(b, '{') + order.RangeFields(m, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + b = append(b, fd.TextName()...) + b = append(b, ':') + b = appendValue(b, v, fd) + b = append(b, delim()...) + return true + }) + b = appendUnknown(b, m.GetUnknown()) + b = bytes.TrimRight(b, delim()) + b = append(b, '}') + return b +} + +var protocmpMessageType = reflect.TypeOf(map[string]interface{}(nil)) + +func appendKnownMessage(b []byte, m protoreflect.Message) []byte { + md := m.Descriptor() + fds := md.Fields() + switch md.FullName() { + case genid.Any_message_fullname: + var msgVal protoreflect.Message + url := m.Get(fds.ByNumber(genid.Any_TypeUrl_field_number)).String() + if v := reflect.ValueOf(m); v.Type().ConvertibleTo(protocmpMessageType) { + // For protocmp.Message, directly obtain the sub-message value + // which is stored in structured form, rather than as raw bytes. + m2 := v.Convert(protocmpMessageType).Interface().(map[string]interface{}) + v, ok := m2[string(genid.Any_Value_field_name)].(proto.Message) + if !ok { + return nil + } + msgVal = v.ProtoReflect() + } else { + val := m.Get(fds.ByNumber(genid.Any_Value_field_number)).Bytes() + mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) + if err != nil { + return nil + } + msgVal = mt.New() + err = proto.UnmarshalOptions{AllowPartial: true}.Unmarshal(val, msgVal.Interface()) + if err != nil { + return nil + } + } + + b = append(b, '{') + b = append(b, "["+url+"]"...) + b = append(b, ':') + b = appendMessage(b, msgVal) + b = append(b, '}') + return b + + case genid.Timestamp_message_fullname: + secs := m.Get(fds.ByNumber(genid.Timestamp_Seconds_field_number)).Int() + nanos := m.Get(fds.ByNumber(genid.Timestamp_Nanos_field_number)).Int() + if nanos < 0 || nanos >= 1e9 { + return nil + } + t := time.Unix(secs, nanos).UTC() + x := t.Format("2006-01-02T15:04:05.000000000") // RFC 3339 + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + return append(b, x+"Z"...) + + case genid.Duration_message_fullname: + sign := "" + secs := m.Get(fds.ByNumber(genid.Duration_Seconds_field_number)).Int() + nanos := m.Get(fds.ByNumber(genid.Duration_Nanos_field_number)).Int() + if nanos <= -1e9 || nanos >= 1e9 || (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0) { + return nil + } + if secs < 0 || nanos < 0 { + sign, secs, nanos = "-", -1*secs, -1*nanos + } + x := fmt.Sprintf("%s%d.%09d", sign, secs, nanos) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + return append(b, x+"s"...) + + case genid.BoolValue_message_fullname, + genid.Int32Value_message_fullname, + genid.Int64Value_message_fullname, + genid.UInt32Value_message_fullname, + genid.UInt64Value_message_fullname, + genid.FloatValue_message_fullname, + genid.DoubleValue_message_fullname, + genid.StringValue_message_fullname, + genid.BytesValue_message_fullname: + fd := fds.ByNumber(genid.WrapperValue_Value_field_number) + return appendValue(b, m.Get(fd), fd) + } + + return nil +} + +func appendUnknown(b []byte, raw protoreflect.RawFields) []byte { + rs := make(map[protoreflect.FieldNumber][]protoreflect.RawFields) + for len(raw) > 0 { + num, _, n := protowire.ConsumeField(raw) + rs[num] = append(rs[num], raw[:n]) + raw = raw[n:] + } + + var ns []protoreflect.FieldNumber + for n := range rs { + ns = append(ns, n) + } + sort.Slice(ns, func(i, j int) bool { return ns[i] < ns[j] }) + + for _, n := range ns { + var leftBracket, rightBracket string + if len(rs[n]) > 1 { + leftBracket, rightBracket = "[", "]" + } + + b = strconv.AppendInt(b, int64(n), 10) + b = append(b, ':') + b = append(b, leftBracket...) + for _, r := range rs[n] { + num, typ, n := protowire.ConsumeTag(r) + r = r[n:] + switch typ { + case protowire.VarintType: + v, _ := protowire.ConsumeVarint(r) + b = strconv.AppendInt(b, int64(v), 10) + case protowire.Fixed32Type: + v, _ := protowire.ConsumeFixed32(r) + b = append(b, fmt.Sprintf("0x%08x", v)...) + case protowire.Fixed64Type: + v, _ := protowire.ConsumeFixed64(r) + b = append(b, fmt.Sprintf("0x%016x", v)...) + case protowire.BytesType: + v, _ := protowire.ConsumeBytes(r) + b = strconv.AppendQuote(b, string(v)) + case protowire.StartGroupType: + v, _ := protowire.ConsumeGroup(num, r) + b = append(b, '{') + b = appendUnknown(b, v) + b = bytes.TrimRight(b, delim()) + b = append(b, '}') + default: + panic(fmt.Sprintf("invalid type: %v", typ)) + } + b = append(b, delim()...) + } + b = bytes.TrimRight(b, delim()) + b = append(b, rightBracket...) + b = append(b, delim()...) + } + return b +} + +func appendList(b []byte, v protoreflect.List, fd protoreflect.FieldDescriptor) []byte { + b = append(b, '[') + for i := 0; i < v.Len(); i++ { + b = appendValue(b, v.Get(i), fd) + b = append(b, delim()...) + } + b = bytes.TrimRight(b, delim()) + b = append(b, ']') + return b +} + +func appendMap(b []byte, v protoreflect.Map, fd protoreflect.FieldDescriptor) []byte { + b = append(b, '{') + order.RangeEntries(v, order.GenericKeyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool { + b = appendValue(b, k.Value(), fd.MapKey()) + b = append(b, ':') + b = appendValue(b, v, fd.MapValue()) + b = append(b, delim()...) + return true + }) + b = bytes.TrimRight(b, delim()) + b = append(b, '}') + return b +} + +func delim() string { + // Deliberately introduce instability into the message string to + // discourage users from depending on it. + if detrand.Bool() { + return " " + } + return ", " +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go index fea589c4..61a84d34 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -87,7 +87,7 @@ func (sb *Builder) grow(n int) { // Unlike strings.Builder, we do not need to copy over the contents // of the old buffer since our builder provides no API for // retrieving previously created strings. - sb.buf = make([]byte, 2*(cap(sb.buf)+n)) + sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) } func (sb *Builder) last(n int) string { diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index b480c501..f7014cd5 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 28 - Patch = 1 + Minor = 30 + Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go index 08d2a46f..ec71e717 100644 --- a/vendor/google.golang.org/protobuf/proto/doc.go +++ b/vendor/google.golang.org/protobuf/proto/doc.go @@ -5,16 +5,13 @@ // Package proto provides functions operating on protocol buffer messages. // // For documentation on protocol buffers in general, see: -// -// https://developers.google.com/protocol-buffers +// https://protobuf.dev. // // For a tutorial on using protocol buffers with Go, see: -// -// https://developers.google.com/protocol-buffers/docs/gotutorial +// https://protobuf.dev/getting-started/gotutorial. // // For a guide to generated Go protocol buffer code, see: -// -// https://developers.google.com/protocol-buffers/docs/reference/go-generated +// https://protobuf.dev/reference/go/go-generated. // // # Binary serialization // diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 67948dd1..1a0be1b0 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -5,30 +5,39 @@ package proto import ( - "bytes" - "math" "reflect" - "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/reflect/protoreflect" ) -// Equal reports whether two messages are equal. -// If two messages marshal to the same bytes under deterministic serialization, -// then Equal is guaranteed to report true. +// Equal reports whether two messages are equal, +// by recursively comparing the fields of the message. // -// Two messages are equal if they belong to the same message descriptor, -// have the same set of populated known and extension field values, -// and the same set of unknown fields values. If either of the top-level -// messages are invalid, then Equal reports true only if both are invalid. +// - Bytes fields are equal if they contain identical bytes. +// Empty bytes (regardless of nil-ness) are considered equal. // -// Scalar values are compared with the equivalent of the == operator in Go, -// except bytes values which are compared using bytes.Equal and -// floating point values which specially treat NaNs as equal. -// Message values are compared by recursively calling Equal. -// Lists are equal if each element value is also equal. -// Maps are equal if they have the same set of keys, where the pair of values -// for each key is also equal. +// - Floating-point fields are equal if they contain the same value. +// Unlike the == operator, a NaN is equal to another NaN. +// +// - Other scalar fields are equal if they contain the same value. +// +// - Message fields are equal if they have +// the same set of populated known and extension field values, and +// the same set of unknown fields values. +// +// - Lists are equal if they are the same length and +// each corresponding element is equal. +// +// - Maps are equal if they have the same set of keys and +// the corresponding value for each key is equal. +// +// An invalid message is not equal to a valid message. +// An invalid message is only equal to another invalid message of the +// same type. An invalid message often corresponds to a nil pointer +// of the concrete message type. For example, (*pb.M)(nil) is not equal +// to &pb.M{}. +// If two valid messages marshal to the same bytes under deterministic +// serialization, then Equal is guaranteed to report true. func Equal(x, y Message) bool { if x == nil || y == nil { return x == nil && y == nil @@ -42,130 +51,7 @@ func Equal(x, y Message) bool { if mx.IsValid() != my.IsValid() { return false } - return equalMessage(mx, my) -} - -// equalMessage compares two messages. -func equalMessage(mx, my protoreflect.Message) bool { - if mx.Descriptor() != my.Descriptor() { - return false - } - - nx := 0 - equal := true - mx.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool { - nx++ - vy := my.Get(fd) - equal = my.Has(fd) && equalField(fd, vx, vy) - return equal - }) - if !equal { - return false - } - ny := 0 - my.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool { - ny++ - return true - }) - if nx != ny { - return false - } - - return equalUnknown(mx.GetUnknown(), my.GetUnknown()) -} - -// equalField compares two fields. -func equalField(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool { - switch { - case fd.IsList(): - return equalList(fd, x.List(), y.List()) - case fd.IsMap(): - return equalMap(fd, x.Map(), y.Map()) - default: - return equalValue(fd, x, y) - } -} - -// equalMap compares two maps. -func equalMap(fd protoreflect.FieldDescriptor, x, y protoreflect.Map) bool { - if x.Len() != y.Len() { - return false - } - equal := true - x.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { - vy := y.Get(k) - equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy) - return equal - }) - return equal -} - -// equalList compares two lists. -func equalList(fd protoreflect.FieldDescriptor, x, y protoreflect.List) bool { - if x.Len() != y.Len() { - return false - } - for i := x.Len() - 1; i >= 0; i-- { - if !equalValue(fd, x.Get(i), y.Get(i)) { - return false - } - } - return true -} - -// equalValue compares two singular values. -func equalValue(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool { - switch fd.Kind() { - case protoreflect.BoolKind: - return x.Bool() == y.Bool() - case protoreflect.EnumKind: - return x.Enum() == y.Enum() - case protoreflect.Int32Kind, protoreflect.Sint32Kind, - protoreflect.Int64Kind, protoreflect.Sint64Kind, - protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind: - return x.Int() == y.Int() - case protoreflect.Uint32Kind, protoreflect.Uint64Kind, - protoreflect.Fixed32Kind, protoreflect.Fixed64Kind: - return x.Uint() == y.Uint() - case protoreflect.FloatKind, protoreflect.DoubleKind: - fx := x.Float() - fy := y.Float() - if math.IsNaN(fx) || math.IsNaN(fy) { - return math.IsNaN(fx) && math.IsNaN(fy) - } - return fx == fy - case protoreflect.StringKind: - return x.String() == y.String() - case protoreflect.BytesKind: - return bytes.Equal(x.Bytes(), y.Bytes()) - case protoreflect.MessageKind, protoreflect.GroupKind: - return equalMessage(x.Message(), y.Message()) - default: - return x.Interface() == y.Interface() - } -} - -// equalUnknown compares unknown fields by direct comparison on the raw bytes -// of each individual field number. -func equalUnknown(x, y protoreflect.RawFields) bool { - if len(x) != len(y) { - return false - } - if bytes.Equal([]byte(x), []byte(y)) { - return true - } - - mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) - my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) - for len(x) > 0 { - fnum, _, n := protowire.ConsumeField(x) - mx[fnum] = append(mx[fnum], x[:n]...) - x = x[n:] - } - for len(y) > 0 { - fnum, _, n := protowire.ConsumeField(y) - my[fnum] = append(my[fnum], y[:n]...) - y = y[n:] - } - return reflect.DeepEqual(mx, my) + vx := protoreflect.ValueOfMessage(mx) + vy := protoreflect.ValueOfMessage(my) + return vx.Equal(vy) } diff --git a/vendor/google.golang.org/protobuf/reflect/protopath/path.go b/vendor/google.golang.org/protobuf/reflect/protopath/path.go new file mode 100644 index 00000000..91562a82 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protopath/path.go @@ -0,0 +1,122 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protopath provides functionality for +// representing a sequence of protobuf reflection operations on a message. +package protopath + +import ( + "fmt" + + "google.golang.org/protobuf/internal/msgfmt" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// NOTE: The Path and Values are separate types here since there are use cases +// where you would like to "address" some value in a message with just the path +// and don't have the value information available. +// +// This is different from how "github.com/google/go-cmp/cmp".Path operates, +// which combines both path and value information together. +// Since the cmp package itself is the only one ever constructing a cmp.Path, +// it will always have the value available. + +// Path is a sequence of protobuf reflection steps applied to some root +// protobuf message value to arrive at the current value. +// The first step must be a Root step. +type Path []Step + +// TODO: Provide a Parse function that parses something similar to or +// perhaps identical to the output of Path.String. + +// Index returns the ith step in the path and supports negative indexing. +// A negative index starts counting from the tail of the Path such that -1 +// refers to the last step, -2 refers to the second-to-last step, and so on. +// It returns a zero Step value if the index is out-of-bounds. +func (p Path) Index(i int) Step { + if i < 0 { + i = len(p) + i + } + if i < 0 || i >= len(p) { + return Step{} + } + return p[i] +} + +// String returns a structured representation of the path +// by concatenating the string representation of every path step. +func (p Path) String() string { + var b []byte + for _, s := range p { + b = s.appendString(b) + } + return string(b) +} + +// Values is a Path paired with a sequence of values at each step. +// The lengths of Path and Values must be identical. +// The first step must be a Root step and +// the first value must be a concrete message value. +type Values struct { + Path Path + Values []protoreflect.Value +} + +// Len reports the length of the path and values. +// If the path and values have differing length, it returns the minimum length. +func (p Values) Len() int { + n := len(p.Path) + if n > len(p.Values) { + n = len(p.Values) + } + return n +} + +// Index returns the ith step and value and supports negative indexing. +// A negative index starts counting from the tail of the Values such that -1 +// refers to the last pair, -2 refers to the second-to-last pair, and so on. +func (p Values) Index(i int) (out struct { + Step Step + Value protoreflect.Value +}) { + // NOTE: This returns a single struct instead of two return values so that + // callers can make use of the the value in an expression: + // vs.Index(i).Value.Interface() + n := p.Len() + if i < 0 { + i = n + i + } + if i < 0 || i >= n { + return out + } + out.Step = p.Path[i] + out.Value = p.Values[i] + return out +} + +// String returns a humanly readable representation of the path and last value. +// Do not depend on the output being stable. +// +// For example: +// +// (path.to.MyMessage).list_field[5].map_field["hello"] = {hello: "world"} +func (p Values) String() string { + n := p.Len() + if n == 0 { + return "" + } + + // Determine the field descriptor associated with the last step. + var fd protoreflect.FieldDescriptor + last := p.Index(-1) + switch last.Step.kind { + case FieldAccessStep: + fd = last.Step.FieldDescriptor() + case MapIndexStep, ListIndexStep: + fd = p.Index(-2).Step.FieldDescriptor() + } + + // Format the full path with the last value. + return fmt.Sprintf("%v = %v", p.Path[:n], msgfmt.FormatValue(last.Value, fd)) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protopath/step.go b/vendor/google.golang.org/protobuf/reflect/protopath/step.go new file mode 100644 index 00000000..95ae85c5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protopath/step.go @@ -0,0 +1,241 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protopath + +import ( + "fmt" + "strconv" + "strings" + + "google.golang.org/protobuf/internal/encoding/text" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// StepKind identifies the kind of step operation. +// Each kind of step corresponds with some protobuf reflection operation. +type StepKind int + +const ( + invalidStep StepKind = iota + // RootStep identifies a step as the Root step operation. + RootStep + // FieldAccessStep identifies a step as the FieldAccess step operation. + FieldAccessStep + // UnknownAccessStep identifies a step as the UnknownAccess step operation. + UnknownAccessStep + // ListIndexStep identifies a step as the ListIndex step operation. + ListIndexStep + // MapIndexStep identifies a step as the MapIndex step operation. + MapIndexStep + // AnyExpandStep identifies a step as the AnyExpand step operation. + AnyExpandStep +) + +func (k StepKind) String() string { + switch k { + case invalidStep: + return "" + case RootStep: + return "Root" + case FieldAccessStep: + return "FieldAccess" + case UnknownAccessStep: + return "UnknownAccess" + case ListIndexStep: + return "ListIndex" + case MapIndexStep: + return "MapIndex" + case AnyExpandStep: + return "AnyExpand" + default: + return fmt.Sprintf("", k) + } +} + +// Step is a union where only one step operation may be specified at a time. +// The different kinds of steps are specified by the constants defined for +// the StepKind type. +type Step struct { + kind StepKind + desc protoreflect.Descriptor + key protoreflect.Value +} + +// Root indicates the root message that a path is relative to. +// It should always (and only ever) be the first step in a path. +func Root(md protoreflect.MessageDescriptor) Step { + if md == nil { + panic("nil message descriptor") + } + return Step{kind: RootStep, desc: md} +} + +// FieldAccess describes access of a field within a message. +// Extension field accesses are also represented using a FieldAccess and +// must be provided with a protoreflect.FieldDescriptor +// +// Within the context of Values, +// the type of the previous step value is always a message, and +// the type of the current step value is determined by the field descriptor. +func FieldAccess(fd protoreflect.FieldDescriptor) Step { + if fd == nil { + panic("nil field descriptor") + } else if _, ok := fd.(protoreflect.ExtensionTypeDescriptor); !ok && fd.IsExtension() { + panic(fmt.Sprintf("extension field %q must implement protoreflect.ExtensionTypeDescriptor", fd.FullName())) + } + return Step{kind: FieldAccessStep, desc: fd} +} + +// UnknownAccess describes access to the unknown fields within a message. +// +// Within the context of Values, +// the type of the previous step value is always a message, and +// the type of the current step value is always a bytes type. +func UnknownAccess() Step { + return Step{kind: UnknownAccessStep} +} + +// ListIndex describes index of an element within a list. +// +// Within the context of Values, +// the type of the previous, previous step value is always a message, +// the type of the previous step value is always a list, and +// the type of the current step value is determined by the field descriptor. +func ListIndex(i int) Step { + if i < 0 { + panic(fmt.Sprintf("invalid list index: %v", i)) + } + return Step{kind: ListIndexStep, key: protoreflect.ValueOfInt64(int64(i))} +} + +// MapIndex describes index of an entry within a map. +// The key type is determined by field descriptor that the map belongs to. +// +// Within the context of Values, +// the type of the previous previous step value is always a message, +// the type of the previous step value is always a map, and +// the type of the current step value is determined by the field descriptor. +func MapIndex(k protoreflect.MapKey) Step { + if !k.IsValid() { + panic("invalid map index") + } + return Step{kind: MapIndexStep, key: k.Value()} +} + +// AnyExpand describes expansion of a google.protobuf.Any message into +// a structured representation of the underlying message. +// +// Within the context of Values, +// the type of the previous step value is always a google.protobuf.Any message, and +// the type of the current step value is always a message. +func AnyExpand(md protoreflect.MessageDescriptor) Step { + if md == nil { + panic("nil message descriptor") + } + return Step{kind: AnyExpandStep, desc: md} +} + +// MessageDescriptor returns the message descriptor for Root or AnyExpand steps, +// otherwise it returns nil. +func (s Step) MessageDescriptor() protoreflect.MessageDescriptor { + switch s.kind { + case RootStep, AnyExpandStep: + return s.desc.(protoreflect.MessageDescriptor) + default: + return nil + } +} + +// FieldDescriptor returns the field descriptor for FieldAccess steps, +// otherwise it returns nil. +func (s Step) FieldDescriptor() protoreflect.FieldDescriptor { + switch s.kind { + case FieldAccessStep: + return s.desc.(protoreflect.FieldDescriptor) + default: + return nil + } +} + +// ListIndex returns the list index for ListIndex steps, +// otherwise it returns 0. +func (s Step) ListIndex() int { + switch s.kind { + case ListIndexStep: + return int(s.key.Int()) + default: + return 0 + } +} + +// MapIndex returns the map key for MapIndex steps, +// otherwise it returns an invalid map key. +func (s Step) MapIndex() protoreflect.MapKey { + switch s.kind { + case MapIndexStep: + return s.key.MapKey() + default: + return protoreflect.MapKey{} + } +} + +// Kind reports which kind of step this is. +func (s Step) Kind() StepKind { + return s.kind +} + +func (s Step) String() string { + return string(s.appendString(nil)) +} + +func (s Step) appendString(b []byte) []byte { + switch s.kind { + case RootStep: + b = append(b, '(') + b = append(b, s.desc.FullName()...) + b = append(b, ')') + case FieldAccessStep: + b = append(b, '.') + if fd := s.desc.(protoreflect.FieldDescriptor); fd.IsExtension() { + b = append(b, '(') + b = append(b, strings.Trim(fd.TextName(), "[]")...) + b = append(b, ')') + } else { + b = append(b, fd.TextName()...) + } + case UnknownAccessStep: + b = append(b, '.') + b = append(b, '?') + case ListIndexStep: + b = append(b, '[') + b = strconv.AppendInt(b, s.key.Int(), 10) + b = append(b, ']') + case MapIndexStep: + b = append(b, '[') + switch k := s.key.Interface().(type) { + case bool: + b = strconv.AppendBool(b, bool(k)) // e.g., "true" or "false" + case int32: + b = strconv.AppendInt(b, int64(k), 10) // e.g., "-32" + case int64: + b = strconv.AppendInt(b, int64(k), 10) // e.g., "-64" + case uint32: + b = strconv.AppendUint(b, uint64(k), 10) // e.g., "32" + case uint64: + b = strconv.AppendUint(b, uint64(k), 10) // e.g., "64" + case string: + b = text.AppendString(b, k) // e.g., `"hello, world"` + } + b = append(b, ']') + case AnyExpandStep: + b = append(b, '.') + b = append(b, '(') + b = append(b, s.desc.FullName()...) + b = append(b, ')') + default: + b = append(b, ""...) + } + return b +} diff --git a/vendor/google.golang.org/protobuf/reflect/protorange/range.go b/vendor/google.golang.org/protobuf/reflect/protorange/range.go new file mode 100644 index 00000000..6f4c58bf --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protorange/range.go @@ -0,0 +1,316 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protorange provides functionality to traverse a message value. +package protorange + +import ( + "bytes" + "errors" + + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protopath" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +var ( + // Break breaks traversal of children in the current value. + // It has no effect when traversing values that are not composite types + // (e.g., messages, lists, and maps). + Break = errors.New("break traversal of children in current value") + + // Terminate terminates the entire range operation. + // All necessary Pop operations continue to be called. + Terminate = errors.New("terminate range operation") +) + +// Range performs a depth-first traversal over reachable values in a message. +// +// See Options.Range for details. +func Range(m protoreflect.Message, f func(protopath.Values) error) error { + return Options{}.Range(m, f, nil) +} + +// Options configures traversal of a message value tree. +type Options struct { + // Stable specifies whether to visit message fields and map entries + // in a stable ordering. If false, then the ordering is undefined and + // may be non-deterministic. + // + // Message fields are visited in ascending order by field number. + // Map entries are visited in ascending order, where + // boolean keys are ordered such that false sorts before true, + // numeric keys are ordered based on the numeric value, and + // string keys are lexicographically ordered by Unicode codepoints. + Stable bool + + // Resolver is used for looking up types when expanding google.protobuf.Any + // messages. If nil, this defaults to using protoregistry.GlobalTypes. + // To prevent expansion of Any messages, pass an empty protoregistry.Types: + // + // Options{Resolver: (*protoregistry.Types)(nil)} + // + Resolver interface { + protoregistry.ExtensionTypeResolver + protoregistry.MessageTypeResolver + } +} + +// Range performs a depth-first traversal over reachable values in a message. +// The first push and the last pop are to push/pop a protopath.Root step. +// If push or pop return any non-nil error (other than Break or Terminate), +// it terminates the traversal and is returned by Range. +// +// The rules for traversing a message is as follows: +// +// • For messages, iterate over every populated known and extension field. +// Each field is preceded by a push of a protopath.FieldAccess step, +// followed by recursive application of the rules on the field value, +// and succeeded by a pop of that step. +// If the message has unknown fields, then push an protopath.UnknownAccess step +// followed immediately by pop of that step. +// +// • As an exception to the above rule, if the current message is a +// google.protobuf.Any message, expand the underlying message (if resolvable). +// The expanded message is preceded by a push of a protopath.AnyExpand step, +// followed by recursive application of the rules on the underlying message, +// and succeeded by a pop of that step. Mutations to the expanded message +// are written back to the Any message when popping back out. +// +// • For lists, iterate over every element. Each element is preceded by a push +// of a protopath.ListIndex step, followed by recursive application of the rules +// on the list element, and succeeded by a pop of that step. +// +// • For maps, iterate over every entry. Each entry is preceded by a push +// of a protopath.MapIndex step, followed by recursive application of the rules +// on the map entry value, and succeeded by a pop of that step. +// +// Mutations should only be made to the last value, otherwise the effects on +// traversal will be undefined. If the mutation is made to the last value +// during to a push, then the effects of the mutation will affect traversal. +// For example, if the last value is currently a message, and the push function +// populates a few fields in that message, then the newly modified fields +// will be traversed. +// +// The protopath.Values provided to push functions is only valid until the +// corresponding pop call and the values provided to a pop call is only valid +// for the duration of the pop call itself. +func (o Options) Range(m protoreflect.Message, push, pop func(protopath.Values) error) error { + var err error + p := new(protopath.Values) + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + pushStep(p, protopath.Root(m.Descriptor()), protoreflect.ValueOfMessage(m)) + if push != nil { + err = amendError(err, push(*p)) + } + if err == nil { + err = o.rangeMessage(p, m, push, pop) + } + if pop != nil { + err = amendError(err, pop(*p)) + } + popStep(p) + + if err == Break || err == Terminate { + err = nil + } + return err +} + +func (o Options) rangeMessage(p *protopath.Values, m protoreflect.Message, push, pop func(protopath.Values) error) (err error) { + if ok, err := o.rangeAnyMessage(p, m, push, pop); ok { + return err + } + + fieldOrder := order.AnyFieldOrder + if o.Stable { + fieldOrder = order.NumberFieldOrder + } + order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + pushStep(p, protopath.FieldAccess(fd), v) + if push != nil { + err = amendError(err, push(*p)) + } + if err == nil { + switch { + case fd.IsMap(): + err = o.rangeMap(p, fd, v.Map(), push, pop) + case fd.IsList(): + err = o.rangeList(p, fd, v.List(), push, pop) + case fd.Message() != nil: + err = o.rangeMessage(p, v.Message(), push, pop) + } + } + if pop != nil { + err = amendError(err, pop(*p)) + } + popStep(p) + return err == nil + }) + + if b := m.GetUnknown(); len(b) > 0 && err == nil { + pushStep(p, protopath.UnknownAccess(), protoreflect.ValueOfBytes(b)) + if push != nil { + err = amendError(err, push(*p)) + } + if pop != nil { + err = amendError(err, pop(*p)) + } + popStep(p) + } + + if err == Break { + err = nil + } + return err +} + +func (o Options) rangeAnyMessage(p *protopath.Values, m protoreflect.Message, push, pop func(protopath.Values) error) (ok bool, err error) { + md := m.Descriptor() + if md.FullName() != "google.protobuf.Any" { + return false, nil + } + + fds := md.Fields() + url := m.Get(fds.ByNumber(genid.Any_TypeUrl_field_number)).String() + val := m.Get(fds.ByNumber(genid.Any_Value_field_number)).Bytes() + mt, errFind := o.Resolver.FindMessageByURL(url) + if errFind != nil { + return false, nil + } + + // Unmarshal the raw encoded message value into a structured message value. + m2 := mt.New() + errUnmarshal := proto.UnmarshalOptions{ + Merge: true, + AllowPartial: true, + Resolver: o.Resolver, + }.Unmarshal(val, m2.Interface()) + if errUnmarshal != nil { + // If the the underlying message cannot be unmarshaled, + // then just treat this as an normal message type. + return false, nil + } + + // Marshal Any before ranging to detect possible mutations. + b1, errMarshal := proto.MarshalOptions{ + AllowPartial: true, + Deterministic: true, + }.Marshal(m2.Interface()) + if errMarshal != nil { + return true, errMarshal + } + + pushStep(p, protopath.AnyExpand(m2.Descriptor()), protoreflect.ValueOfMessage(m2)) + if push != nil { + err = amendError(err, push(*p)) + } + if err == nil { + err = o.rangeMessage(p, m2, push, pop) + } + if pop != nil { + err = amendError(err, pop(*p)) + } + popStep(p) + + // Marshal Any after ranging to detect possible mutations. + b2, errMarshal := proto.MarshalOptions{ + AllowPartial: true, + Deterministic: true, + }.Marshal(m2.Interface()) + if errMarshal != nil { + return true, errMarshal + } + + // Mutations detected, write the new sequence of bytes to the Any message. + if !bytes.Equal(b1, b2) { + m.Set(fds.ByNumber(genid.Any_Value_field_number), protoreflect.ValueOfBytes(b2)) + } + + if err == Break { + err = nil + } + return true, err +} + +func (o Options) rangeList(p *protopath.Values, fd protoreflect.FieldDescriptor, ls protoreflect.List, push, pop func(protopath.Values) error) (err error) { + for i := 0; i < ls.Len() && err == nil; i++ { + v := ls.Get(i) + pushStep(p, protopath.ListIndex(i), v) + if push != nil { + err = amendError(err, push(*p)) + } + if err == nil && fd.Message() != nil { + err = o.rangeMessage(p, v.Message(), push, pop) + } + if pop != nil { + err = amendError(err, pop(*p)) + } + popStep(p) + } + + if err == Break { + err = nil + } + return err +} + +func (o Options) rangeMap(p *protopath.Values, fd protoreflect.FieldDescriptor, ms protoreflect.Map, push, pop func(protopath.Values) error) (err error) { + keyOrder := order.AnyKeyOrder + if o.Stable { + keyOrder = order.GenericKeyOrder + } + order.RangeEntries(ms, keyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool { + pushStep(p, protopath.MapIndex(k), v) + if push != nil { + err = amendError(err, push(*p)) + } + if err == nil && fd.MapValue().Message() != nil { + err = o.rangeMessage(p, v.Message(), push, pop) + } + if pop != nil { + err = amendError(err, pop(*p)) + } + popStep(p) + return err == nil + }) + + if err == Break { + err = nil + } + return err +} + +func pushStep(p *protopath.Values, s protopath.Step, v protoreflect.Value) { + p.Path = append(p.Path, s) + p.Values = append(p.Values, v) +} + +func popStep(p *protopath.Values) { + p.Path = p.Path[:len(p.Path)-1] + p.Values = p.Values[:len(p.Values)-1] +} + +// amendError amends the previous error with the current error if it is +// considered more serious. The precedence order for errors is: +// +// nil < Break < Terminate < previous non-nil < current non-nil +func amendError(prev, curr error) error { + switch { + case curr == nil: + return prev + case curr == Break && prev != nil: + return prev + case curr == Terminate && prev != nil && prev != Break: + return prev + default: + return curr + } +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index b03c1223..54ce326d 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -35,6 +35,8 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) case 12: b = p.appendSingularField(b, "syntax", nil) + case 13: + b = p.appendSingularField(b, "edition", nil) } return b } @@ -236,6 +238,8 @@ func (p *SourcePath) appendMessageOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 7: b = p.appendSingularField(b, "map_entry", nil) + case 11: + b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -279,6 +283,8 @@ func (p *SourcePath) appendEnumOptions(b []byte) []byte { b = p.appendSingularField(b, "allow_alias", nil) case 3: b = p.appendSingularField(b, "deprecated", nil) + case 6: + b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -345,10 +351,18 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendSingularField(b, "jstype", nil) case 5: b = p.appendSingularField(b, "lazy", nil) + case 15: + b = p.appendSingularField(b, "unverified_lazy", nil) case 3: b = p.appendSingularField(b, "deprecated", nil) case 10: b = p.appendSingularField(b, "weak", nil) + case 16: + b = p.appendSingularField(b, "debug_redact", nil) + case 17: + b = p.appendSingularField(b, "retention", nil) + case 18: + b = p.appendSingularField(b, "target", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go index f3198107..37601b78 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -148,7 +148,7 @@ type Message interface { // be preserved in marshaling or other operations. IsValid() bool - // ProtoMethods returns optional fast-path implementions of various operations. + // ProtoMethods returns optional fast-path implementations of various operations. // This method may return nil. // // The returned methods type is identical to diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go new file mode 100644 index 00000000..59165254 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go @@ -0,0 +1,168 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import ( + "bytes" + "fmt" + "math" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" +) + +// Equal reports whether v1 and v2 are recursively equal. +// +// - Values of different types are always unequal. +// +// - Bytes values are equal if they contain identical bytes. +// Empty bytes (regardless of nil-ness) are considered equal. +// +// - Floating point values are equal if they contain the same value. +// Unlike the == operator, a NaN is equal to another NaN. +// +// - Enums are equal if they contain the same number. +// Since Value does not contain an enum descriptor, +// enum values do not consider the type of the enum. +// +// - Other scalar values are equal if they contain the same value. +// +// - Message values are equal if they belong to the same message descriptor, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. +// +// - Lists are equal if they are the same length and +// each corresponding element is equal. +// +// - Maps are equal if they have the same set of keys and +// the corresponding value for each key is equal. +func (v1 Value) Equal(v2 Value) bool { + return equalValue(v1, v2) +} + +func equalValue(x, y Value) bool { + eqType := x.typ == y.typ + switch x.typ { + case nilType: + return eqType + case boolType: + return eqType && x.Bool() == y.Bool() + case int32Type, int64Type: + return eqType && x.Int() == y.Int() + case uint32Type, uint64Type: + return eqType && x.Uint() == y.Uint() + case float32Type, float64Type: + return eqType && equalFloat(x.Float(), y.Float()) + case stringType: + return eqType && x.String() == y.String() + case bytesType: + return eqType && bytes.Equal(x.Bytes(), y.Bytes()) + case enumType: + return eqType && x.Enum() == y.Enum() + default: + switch x := x.Interface().(type) { + case Message: + y, ok := y.Interface().(Message) + return ok && equalMessage(x, y) + case List: + y, ok := y.Interface().(List) + return ok && equalList(x, y) + case Map: + y, ok := y.Interface().(Map) + return ok && equalMap(x, y) + default: + panic(fmt.Sprintf("unknown type: %T", x)) + } + } +} + +// equalFloat compares two floats, where NaNs are treated as equal. +func equalFloat(x, y float64) bool { + if math.IsNaN(x) || math.IsNaN(y) { + return math.IsNaN(x) && math.IsNaN(y) + } + return x == y +} + +// equalMessage compares two messages. +func equalMessage(mx, my Message) bool { + if mx.Descriptor() != my.Descriptor() { + return false + } + + nx := 0 + equal := true + mx.Range(func(fd FieldDescriptor, vx Value) bool { + nx++ + vy := my.Get(fd) + equal = my.Has(fd) && equalValue(vx, vy) + return equal + }) + if !equal { + return false + } + ny := 0 + my.Range(func(fd FieldDescriptor, vx Value) bool { + ny++ + return true + }) + if nx != ny { + return false + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +// equalList compares two lists. +func equalList(x, y List) bool { + if x.Len() != y.Len() { + return false + } + for i := x.Len() - 1; i >= 0; i-- { + if !equalValue(x.Get(i), y.Get(i)) { + return false + } + } + return true +} + +// equalMap compares two maps. +func equalMap(x, y Map) bool { + if x.Len() != y.Len() { + return false + } + equal := true + x.Range(func(k MapKey, vx Value) bool { + vy := y.Get(k) + equal = y.Has(k) && equalValue(vx, vy) + return equal + }) + return equal +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +func equalUnknown(x, y RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[FieldNumber]RawFields) + my := make(map[FieldNumber]RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + return reflect.DeepEqual(mx, my) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index ca8e28c5..08e5ef73 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -54,11 +54,11 @@ import ( // // Append a 0 to a "repeated int32" field. // // Since the Value returned by Mutable is guaranteed to alias // // the source message, modifying the Value modifies the message. -// message.Mutable(fieldDesc).(List).Append(protoreflect.ValueOfInt32(0)) +// message.Mutable(fieldDesc).List().Append(protoreflect.ValueOfInt32(0)) // // // Assign [0] to a "repeated int32" field by creating a new Value, // // modifying it, and assigning it. -// list := message.NewField(fieldDesc).(List) +// list := message.NewField(fieldDesc).List() // list.Append(protoreflect.ValueOfInt32(0)) // message.Set(fieldDesc, list) // // ERROR: Since it is not defined whether Set aliases the source, diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 58352a69..aeb55977 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -46,7 +46,7 @@ var conflictPolicy = "panic" // "panic" | "warn" | "ignore" // It is a variable so that the behavior is easily overridden in another file. var ignoreConflict = func(d protoreflect.Descriptor, err error) bool { const env = "GOLANG_PROTOBUF_REGISTRATION_CONFLICT" - const faq = "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict" + const faq = "https://protobuf.dev/reference/go/faq#namespace-conflict" policy := conflictPolicy if v := os.Getenv(env); v != "" { policy = v diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index abe4ab51..dac5671d 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -406,6 +406,152 @@ func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} } +// If set to RETENTION_SOURCE, the option will be omitted from the binary. +// Note: as of January 2023, support for this is in progress and does not yet +// have an effect (b/264593489). +type FieldOptions_OptionRetention int32 + +const ( + FieldOptions_RETENTION_UNKNOWN FieldOptions_OptionRetention = 0 + FieldOptions_RETENTION_RUNTIME FieldOptions_OptionRetention = 1 + FieldOptions_RETENTION_SOURCE FieldOptions_OptionRetention = 2 +) + +// Enum value maps for FieldOptions_OptionRetention. +var ( + FieldOptions_OptionRetention_name = map[int32]string{ + 0: "RETENTION_UNKNOWN", + 1: "RETENTION_RUNTIME", + 2: "RETENTION_SOURCE", + } + FieldOptions_OptionRetention_value = map[string]int32{ + "RETENTION_UNKNOWN": 0, + "RETENTION_RUNTIME": 1, + "RETENTION_SOURCE": 2, + } +) + +func (x FieldOptions_OptionRetention) Enum() *FieldOptions_OptionRetention { + p := new(FieldOptions_OptionRetention) + *p = x + return p +} + +func (x FieldOptions_OptionRetention) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() +} + +func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[5] +} + +func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_OptionRetention) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_OptionRetention(num) + return nil +} + +// Deprecated: Use FieldOptions_OptionRetention.Descriptor instead. +func (FieldOptions_OptionRetention) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 2} +} + +// This indicates the types of entities that the field may apply to when used +// as an option. If it is unset, then the field may be freely used as an +// option on any kind of entity. Note: as of January 2023, support for this is +// in progress and does not yet have an effect (b/264593489). +type FieldOptions_OptionTargetType int32 + +const ( + FieldOptions_TARGET_TYPE_UNKNOWN FieldOptions_OptionTargetType = 0 + FieldOptions_TARGET_TYPE_FILE FieldOptions_OptionTargetType = 1 + FieldOptions_TARGET_TYPE_EXTENSION_RANGE FieldOptions_OptionTargetType = 2 + FieldOptions_TARGET_TYPE_MESSAGE FieldOptions_OptionTargetType = 3 + FieldOptions_TARGET_TYPE_FIELD FieldOptions_OptionTargetType = 4 + FieldOptions_TARGET_TYPE_ONEOF FieldOptions_OptionTargetType = 5 + FieldOptions_TARGET_TYPE_ENUM FieldOptions_OptionTargetType = 6 + FieldOptions_TARGET_TYPE_ENUM_ENTRY FieldOptions_OptionTargetType = 7 + FieldOptions_TARGET_TYPE_SERVICE FieldOptions_OptionTargetType = 8 + FieldOptions_TARGET_TYPE_METHOD FieldOptions_OptionTargetType = 9 +) + +// Enum value maps for FieldOptions_OptionTargetType. +var ( + FieldOptions_OptionTargetType_name = map[int32]string{ + 0: "TARGET_TYPE_UNKNOWN", + 1: "TARGET_TYPE_FILE", + 2: "TARGET_TYPE_EXTENSION_RANGE", + 3: "TARGET_TYPE_MESSAGE", + 4: "TARGET_TYPE_FIELD", + 5: "TARGET_TYPE_ONEOF", + 6: "TARGET_TYPE_ENUM", + 7: "TARGET_TYPE_ENUM_ENTRY", + 8: "TARGET_TYPE_SERVICE", + 9: "TARGET_TYPE_METHOD", + } + FieldOptions_OptionTargetType_value = map[string]int32{ + "TARGET_TYPE_UNKNOWN": 0, + "TARGET_TYPE_FILE": 1, + "TARGET_TYPE_EXTENSION_RANGE": 2, + "TARGET_TYPE_MESSAGE": 3, + "TARGET_TYPE_FIELD": 4, + "TARGET_TYPE_ONEOF": 5, + "TARGET_TYPE_ENUM": 6, + "TARGET_TYPE_ENUM_ENTRY": 7, + "TARGET_TYPE_SERVICE": 8, + "TARGET_TYPE_METHOD": 9, + } +) + +func (x FieldOptions_OptionTargetType) Enum() *FieldOptions_OptionTargetType { + p := new(FieldOptions_OptionTargetType) + *p = x + return p +} + +func (x FieldOptions_OptionTargetType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() +} + +func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[6] +} + +func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_OptionTargetType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_OptionTargetType(num) + return nil +} + +// Deprecated: Use FieldOptions_OptionTargetType.Descriptor instead. +func (FieldOptions_OptionTargetType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 3} +} + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, // or neither? HTTP based RPC implementation may choose GET verb for safe // methods, and PUT verb for idempotent methods instead of the default POST. @@ -442,11 +588,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[7] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -468,6 +614,70 @@ func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} } +// Represents the identified object's effect on the element in the original +// .proto file. +type GeneratedCodeInfo_Annotation_Semantic int32 + +const ( + // There is no effect or the effect is indescribable. + GeneratedCodeInfo_Annotation_NONE GeneratedCodeInfo_Annotation_Semantic = 0 + // The element is set or otherwise mutated. + GeneratedCodeInfo_Annotation_SET GeneratedCodeInfo_Annotation_Semantic = 1 + // An alias to the element is returned. + GeneratedCodeInfo_Annotation_ALIAS GeneratedCodeInfo_Annotation_Semantic = 2 +) + +// Enum value maps for GeneratedCodeInfo_Annotation_Semantic. +var ( + GeneratedCodeInfo_Annotation_Semantic_name = map[int32]string{ + 0: "NONE", + 1: "SET", + 2: "ALIAS", + } + GeneratedCodeInfo_Annotation_Semantic_value = map[string]int32{ + "NONE": 0, + "SET": 1, + "ALIAS": 2, + } +) + +func (x GeneratedCodeInfo_Annotation_Semantic) Enum() *GeneratedCodeInfo_Annotation_Semantic { + p := new(GeneratedCodeInfo_Annotation_Semantic) + *p = x + return p +} + +func (x GeneratedCodeInfo_Annotation_Semantic) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() +} + +func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[8] +} + +func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *GeneratedCodeInfo_Annotation_Semantic) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = GeneratedCodeInfo_Annotation_Semantic(num) + return nil +} + +// Deprecated: Use GeneratedCodeInfo_Annotation_Semantic.Descriptor instead. +func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0, 0} +} + // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. type FileDescriptorSet struct { @@ -544,8 +754,12 @@ type FileDescriptorProto struct { // development tools. SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` // The syntax of the proto file. - // The supported values are "proto2" and "proto3". + // The supported values are "proto2", "proto3", and "editions". + // + // If `edition` is present, this value must be "editions". Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + // The edition of the proto file, which is an opaque string. + Edition *string `protobuf:"bytes,13,opt,name=edition" json:"edition,omitempty"` } func (x *FileDescriptorProto) Reset() { @@ -664,6 +878,13 @@ func (x *FileDescriptorProto) GetSyntax() string { return "" } +func (x *FileDescriptorProto) GetEdition() string { + if x != nil && x.Edition != nil { + return *x.Edition + } + return "" +} + // Describes a message type. type DescriptorProto struct { state protoimpl.MessageState @@ -860,7 +1081,6 @@ type FieldDescriptorProto struct { // For booleans, "true" or "false". // For strings, contains the default text contents (not escaped in any way). // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` // If set, gives the index of a oneof in the containing type's oneof_decl // list. This field is a member of that oneof. @@ -1382,22 +1602,22 @@ type FileOptions struct { // inappropriate because proto packages do not normally start with backwards // domain names. JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). + // Controls the name of the wrapper Java class generated for the .proto file. + // That class will always contain the .proto file's getDescriptor() method as + // well as any top-level extensions defined in the .proto file. + // If java_multiple_files is disabled, then all the other classes from the + // .proto file will be nested inside the single wrapper outer class. JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` - // If set true, then the Java code generator will generate a separate .java + // If enabled, then the Java code generator will generate a separate .java // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be + // file. Thus, these types will *not* be nested inside the wrapper class + // named by java_outer_classname. However, the wrapper class will still be // generated to contain the file's getDescriptor() method as well as any // top-level extensions defined in the file. JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` // This option does nothing. // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 @@ -1531,7 +1751,7 @@ func (x *FileOptions) GetJavaMultipleFiles() bool { return Default_FileOptions_JavaMultipleFiles } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. func (x *FileOptions) GetJavaGenerateEqualsAndHash() bool { if x != nil && x.JavaGenerateEqualsAndHash != nil { return *x.JavaGenerateEqualsAndHash @@ -1670,10 +1890,12 @@ type MessageOptions struct { // efficient, has fewer features, and is more complicated. // // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } + // + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // // Note that the message cannot have any defined fields; MessageSets only // have extensions. // @@ -1692,28 +1914,44 @@ type MessageOptions struct { // for the message, or it will be completely ignored; in the very least, // this is a formalization for deprecating messages. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + // // Whether the message is an automatically generated map entry type for the // maps field. // // For maps fields: - // map map_field = 1; + // + // map map_field = 1; + // // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; + // + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; // // Implementations may choose not to generate the map_entry=true message, but // use a native map in the target language to hold the keys and values. // The reflection APIs in such implementations still need to work as // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // Enable the legacy handling of JSON field name conflicts. This lowercases + // and strips underscored from the fields before comparison in proto3 only. + // The new behavior takes `json_name` into account and applies to proto2 as + // well. + // + // This should only be used as a temporary measure against broken builds due + // to the change in behavior for JSON field name conflicts. + // + // TODO(b/261750190) This is legacy behavior we plan to remove once downstream + // teams have had time to migrate. + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -1785,6 +2023,14 @@ func (x *MessageOptions) GetMapEntry() bool { return false } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. +func (x *MessageOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { + if x != nil && x.DeprecatedLegacyJsonFieldConflicts != nil { + return *x.DeprecatedLegacyJsonFieldConflicts + } + return false +} + func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -1838,7 +2084,6 @@ type FieldOptions struct { // call from multiple threads concurrently, while non-const methods continue // to require exclusive access. // - // // Note that implementations may choose not to check required fields within // a lazy sub-message. That is, calling IsInitialized() on the outer message // may return true even if the inner message has missing required fields. @@ -1849,7 +2094,14 @@ type FieldOptions struct { // implementation must either *always* check its required fields, or *never* // check its required fields, regardless of whether or not the message has // been parsed. + // + // As of May 2022, lazy verifies the contents of the byte stream during + // parsing. An invalid byte stream will cause the overall parsing to fail. Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // unverified_lazy does no correctness checks on the byte stream. This should + // only be used where lazy with verification is prohibitive for performance + // reasons. + UnverifiedLazy *bool `protobuf:"varint,15,opt,name=unverified_lazy,json=unverifiedLazy,def=0" json:"unverified_lazy,omitempty"` // Is this field deprecated? // Depending on the target platform, this can emit Deprecated annotations // for accessors, or it will be completely ignored; in the very least, this @@ -1857,17 +2109,24 @@ type FieldOptions struct { Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // For Google-internal migration only. Do not use. Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // Indicate that the field value should not be printed out when using debug + // formats, e.g. when the field contains sensitive credentials. + DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` + Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } // Default values for FieldOptions fields. const ( - Default_FieldOptions_Ctype = FieldOptions_STRING - Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL - Default_FieldOptions_Lazy = bool(false) - Default_FieldOptions_Deprecated = bool(false) - Default_FieldOptions_Weak = bool(false) + Default_FieldOptions_Ctype = FieldOptions_STRING + Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL + Default_FieldOptions_Lazy = bool(false) + Default_FieldOptions_UnverifiedLazy = bool(false) + Default_FieldOptions_Deprecated = bool(false) + Default_FieldOptions_Weak = bool(false) + Default_FieldOptions_DebugRedact = bool(false) ) func (x *FieldOptions) Reset() { @@ -1930,6 +2189,13 @@ func (x *FieldOptions) GetLazy() bool { return Default_FieldOptions_Lazy } +func (x *FieldOptions) GetUnverifiedLazy() bool { + if x != nil && x.UnverifiedLazy != nil { + return *x.UnverifiedLazy + } + return Default_FieldOptions_UnverifiedLazy +} + func (x *FieldOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -1944,6 +2210,27 @@ func (x *FieldOptions) GetWeak() bool { return Default_FieldOptions_Weak } +func (x *FieldOptions) GetDebugRedact() bool { + if x != nil && x.DebugRedact != nil { + return *x.DebugRedact + } + return Default_FieldOptions_DebugRedact +} + +func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention { + if x != nil && x.Retention != nil { + return *x.Retention + } + return FieldOptions_RETENTION_UNKNOWN +} + +func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType { + if x != nil && x.Target != nil { + return *x.Target + } + return FieldOptions_TARGET_TYPE_UNKNOWN +} + func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2014,6 +2301,15 @@ type EnumOptions struct { // for the enum, or it will be completely ignored; in the very least, this // is a formalization for deprecating enums. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enable the legacy handling of JSON field name conflicts. This lowercases + // and strips underscored from the fields before comparison in proto3 only. + // The new behavior takes `json_name` into account and applies to proto2 as + // well. + // TODO(b/261750190) Remove this legacy behavior once downstream teams have + // had time to migrate. + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2069,6 +2365,14 @@ func (x *EnumOptions) GetDeprecated() bool { return Default_EnumOptions_Deprecated } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. +func (x *EnumOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { + if x != nil && x.DeprecatedLegacyJsonFieldConflicts != nil { + return *x.DeprecatedLegacyJsonFieldConflicts + } + return false +} + func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2399,43 +2703,48 @@ type SourceCodeInfo struct { // tools. // // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } + // + // message Foo { + // optional string foo = 1; + // } + // // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi + // + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). // // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendant. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` } @@ -2715,8 +3024,8 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { // The name of the uninterpreted option. Each string represents a segment in // a dot-separated name. is_extension is true iff a segment represents an // extension (denoted with parentheses in options specs in .proto files). -// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents -// "foo.(bar.baz).qux". +// E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents +// "foo.(bar.baz).moo". type UninterpretedOption_NamePart struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2781,23 +3090,34 @@ type SourceCodeInfo_Location struct { // location. // // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] + // the root FileDescriptorProto to the place where the definition occurs. + // For example, this path: + // + // [ 4, 3, 2, 7, 1 ] + // // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 + // + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; + // + // repeated DescriptorProto message_type = 4; + // // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; + // + // repeated FieldDescriptorProto field = 2; + // // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; + // + // optional string name = 1; // // Thus, the above path gives the location of a field name. If we removed // the last element: - // [ 4, 3, 2, 7 ] + // + // [ 4, 3, 2, 7 ] + // // this path refers to the whole field declaration (from the beginning // of the label to the terminating semicolon). Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` @@ -2826,34 +3146,34 @@ type SourceCodeInfo_Location struct { // // Examples: // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; + // // Comment attached to moo. + // // + // // Another line attached to moo. + // optional double moo = 4; // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. + // // Detached comment for corge. This is not leading or trailing comments + // // to moo or corge because there are blank lines separating it from + // // both. // - // // Detached comment for corge paragraph 2. + // // Detached comment for corge paragraph 2. // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; // - // // ignored detached comments. + // // ignored detached comments. LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` @@ -2940,9 +3260,10 @@ type GeneratedCodeInfo_Annotation struct { // that relates to the identified object. Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past + // relates to the identified object. The end offset should be one past // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"` } func (x *GeneratedCodeInfo_Annotation) Reset() { @@ -3005,6 +3326,13 @@ func (x *GeneratedCodeInfo_Annotation) GetEnd() int32 { return 0 } +func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotation_Semantic { + if x != nil && x.Semantic != nil { + return *x.Semantic + } + return GeneratedCodeInfo_Annotation_NONE +} + var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor var file_google_protobuf_descriptor_proto_rawDesc = []byte{ @@ -3016,7 +3344,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x22, 0xe4, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x6c, 0x65, 0x22, 0xfe, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, @@ -3054,330 +3382,391 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, - 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, - 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, - 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, + 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, - 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, + 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, - 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, - 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, - 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, - 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, - 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, - 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, - 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, - 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, - 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, - 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, - 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, - 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, - 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, - 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, - 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, - 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, - 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, - 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, - 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, - 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, - 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, - 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, - 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, - 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, - 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, - 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, - 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, - 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, - 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, - 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, - 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, - 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, - 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, - 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, - 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, - 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, - 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, - 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, + 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, + 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, + 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, + 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, + 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, + 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, + 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, + 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, + 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, + 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, + 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, + 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, + 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, + 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, + 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, + 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, + 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, + 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, + 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, + 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, + 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, + 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, + 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, + 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, + 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, + 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, + 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, + 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, + 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, + 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, + 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, + 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, + 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, + 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, + 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, + 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, + 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, + 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, + 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, + 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, + 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, + 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, - 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, - 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, - 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, - 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, - 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, - 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, - 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, - 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, - 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, - 0x10, 0x27, 0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, - 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, - 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, - 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, + 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, + 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, + 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, + 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, + 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, + 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, + 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, + 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, + 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, + 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, + 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, - 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, - 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xe2, 0x03, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, + 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, + 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, + 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, 0x03, 0x0a, + 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, + 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, + 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, + 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, + 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, + 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, + 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, + 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, + 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xb7, 0x08, 0x0a, 0x0c, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, + 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, - 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, - 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, - 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, - 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, - 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, - 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, - 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, - 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, - 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, - 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, - 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, - 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, - 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, + 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, + 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x12, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, - 0x22, 0xc0, 0x01, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, - 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, + 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, + 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, + 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, + 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, + 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, + 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, + 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, + 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, + 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, + 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, + 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, + 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, + 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x98, 0x02, 0x0a, 0x0b, 0x45, 0x6e, + 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, + 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, @@ -3385,97 +3774,95 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, - 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x80, 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, + 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, + 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, + 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, + 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, + 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, + 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, + 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, + 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, + 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, - 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, - 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, - 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, - 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, - 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, - 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, - 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, - 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, - 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, - 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, - 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, - 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, - 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, - 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, - 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, - 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, - 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, - 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, - 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, - 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, - 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, - 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, - 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, - 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, - 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd1, 0x01, - 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, - 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, - 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, - 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, - 0x64, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, + 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, + 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, + 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, + 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, + 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, + 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, + 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, + 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, + 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, + 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, + 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, + 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, + 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, + 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, + 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, + 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, + 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, + 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, + 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, + 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, + 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, + 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, + 0x02, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, @@ -3498,7 +3885,7 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 9) var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 27) var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ (FieldDescriptorProto_Type)(0), // 0: google.protobuf.FieldDescriptorProto.Type @@ -3506,84 +3893,90 @@ var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ (FileOptions_OptimizeMode)(0), // 2: google.protobuf.FileOptions.OptimizeMode (FieldOptions_CType)(0), // 3: google.protobuf.FieldOptions.CType (FieldOptions_JSType)(0), // 4: google.protobuf.FieldOptions.JSType - (MethodOptions_IdempotencyLevel)(0), // 5: google.protobuf.MethodOptions.IdempotencyLevel - (*FileDescriptorSet)(nil), // 6: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 7: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 8: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 9: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 10: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 11: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 12: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 13: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 14: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 15: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 16: google.protobuf.FileOptions - (*MessageOptions)(nil), // 17: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 18: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 19: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 20: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 21: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 23: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 24: google.protobuf.UninterpretedOption - (*SourceCodeInfo)(nil), // 25: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 26: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 27: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 28: google.protobuf.DescriptorProto.ReservedRange - (*EnumDescriptorProto_EnumReservedRange)(nil), // 29: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*UninterpretedOption_NamePart)(nil), // 30: google.protobuf.UninterpretedOption.NamePart - (*SourceCodeInfo_Location)(nil), // 31: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 32: google.protobuf.GeneratedCodeInfo.Annotation + (FieldOptions_OptionRetention)(0), // 5: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 6: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 7: google.protobuf.MethodOptions.IdempotencyLevel + (GeneratedCodeInfo_Annotation_Semantic)(0), // 8: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 9: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 10: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 11: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 12: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 13: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 14: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 15: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 16: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 17: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 18: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 19: google.protobuf.FileOptions + (*MessageOptions)(nil), // 20: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 21: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 22: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 23: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 24: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 25: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 26: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 27: google.protobuf.UninterpretedOption + (*SourceCodeInfo)(nil), // 28: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 29: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 30: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 31: google.protobuf.DescriptorProto.ReservedRange + (*EnumDescriptorProto_EnumReservedRange)(nil), // 32: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*UninterpretedOption_NamePart)(nil), // 33: google.protobuf.UninterpretedOption.NamePart + (*SourceCodeInfo_Location)(nil), // 34: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 35: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 7, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 8, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 12, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 14, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 10, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 16, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 25, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo - 10, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 10, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 8, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 12, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 27, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 11, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 17, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 28, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 24, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 10, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 11, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 15, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 17, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 13, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 19, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 28, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 13, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 13, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 11, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 15, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 30, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 14, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 20, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 31, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 27, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption 1, // 16: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label 0, // 17: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 18, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 19, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 13, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 20, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 29, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 21, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 15, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 22, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 23, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 21, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 22, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 16, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 23, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 32, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 24, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 18, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 25, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 26, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions 2, // 27: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 24, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption 3, // 30: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType 4, // 31: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 24, // 32: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 33: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 34: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 35: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 36: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 5, // 37: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 24, // 38: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 30, // 39: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 31, // 40: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 32, // 41: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 9, // 42: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 43, // [43:43] is the sub-list for method output_type - 43, // [43:43] is the sub-list for method input_type - 43, // [43:43] is the sub-list for extension type_name - 43, // [43:43] is the sub-list for extension extendee - 0, // [0:43] is the sub-list for field type_name + 5, // 32: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 6, // 33: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType + 27, // 34: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 35: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 36: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 37: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 27, // 38: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 7, // 39: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 27, // 40: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 33, // 41: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 34, // 42: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 35, // 43: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 12, // 44: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 8, // 45: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 46, // [46:46] is the sub-list for method output_type + 46, // [46:46] is the sub-list for method input_type + 46, // [46:46] is the sub-list for extension type_name + 46, // [46:46] is the sub-list for extension extendee + 0, // [0:46] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -3940,7 +4333,7 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, - NumEnums: 6, + NumEnums: 9, NumMessages: 27, NumExtensions: 0, NumServices: 0, diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go new file mode 100644 index 00000000..f77ef0de --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go @@ -0,0 +1,717 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package dynamicpb creates protocol buffer messages using runtime type information. +package dynamicpb + +import ( + "math" + + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// enum is a dynamic protoreflect.Enum. +type enum struct { + num protoreflect.EnumNumber + typ protoreflect.EnumType +} + +func (e enum) Descriptor() protoreflect.EnumDescriptor { return e.typ.Descriptor() } +func (e enum) Type() protoreflect.EnumType { return e.typ } +func (e enum) Number() protoreflect.EnumNumber { return e.num } + +// enumType is a dynamic protoreflect.EnumType. +type enumType struct { + desc protoreflect.EnumDescriptor +} + +// NewEnumType creates a new EnumType with the provided descriptor. +// +// EnumTypes created by this package are equal if their descriptors are equal. +// That is, if ed1 == ed2, then NewEnumType(ed1) == NewEnumType(ed2). +// +// Enum values created by the EnumType are equal if their numbers are equal. +func NewEnumType(desc protoreflect.EnumDescriptor) protoreflect.EnumType { + return enumType{desc} +} + +func (et enumType) New(n protoreflect.EnumNumber) protoreflect.Enum { return enum{n, et} } +func (et enumType) Descriptor() protoreflect.EnumDescriptor { return et.desc } + +// extensionType is a dynamic protoreflect.ExtensionType. +type extensionType struct { + desc extensionTypeDescriptor +} + +// A Message is a dynamically constructed protocol buffer message. +// +// Message implements the proto.Message interface, and may be used with all +// standard proto package functions such as Marshal, Unmarshal, and so forth. +// +// Message also implements the protoreflect.Message interface. See the protoreflect +// package documentation for that interface for how to get and set fields and +// otherwise interact with the contents of a Message. +// +// Reflection API functions which construct messages, such as NewField, +// return new dynamic messages of the appropriate type. Functions which take +// messages, such as Set for a message-value field, will accept any message +// with a compatible type. +// +// Operations which modify a Message are not safe for concurrent use. +type Message struct { + typ messageType + known map[protoreflect.FieldNumber]protoreflect.Value + ext map[protoreflect.FieldNumber]protoreflect.FieldDescriptor + unknown protoreflect.RawFields +} + +var ( + _ protoreflect.Message = (*Message)(nil) + _ protoreflect.ProtoMessage = (*Message)(nil) + _ protoiface.MessageV1 = (*Message)(nil) +) + +// NewMessage creates a new message with the provided descriptor. +func NewMessage(desc protoreflect.MessageDescriptor) *Message { + return &Message{ + typ: messageType{desc}, + known: make(map[protoreflect.FieldNumber]protoreflect.Value), + ext: make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor), + } +} + +// ProtoMessage implements the legacy message interface. +func (m *Message) ProtoMessage() {} + +// ProtoReflect implements the protoreflect.ProtoMessage interface. +func (m *Message) ProtoReflect() protoreflect.Message { + return m +} + +// String returns a string representation of a message. +func (m *Message) String() string { + return protoimpl.X.MessageStringOf(m) +} + +// Reset clears the message to be empty, but preserves the dynamic message type. +func (m *Message) Reset() { + m.known = make(map[protoreflect.FieldNumber]protoreflect.Value) + m.ext = make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor) + m.unknown = nil +} + +// Descriptor returns the message descriptor. +func (m *Message) Descriptor() protoreflect.MessageDescriptor { + return m.typ.desc +} + +// Type returns the message type. +func (m *Message) Type() protoreflect.MessageType { + return m.typ +} + +// New returns a newly allocated empty message with the same descriptor. +// See protoreflect.Message for details. +func (m *Message) New() protoreflect.Message { + return m.Type().New() +} + +// Interface returns the message. +// See protoreflect.Message for details. +func (m *Message) Interface() protoreflect.ProtoMessage { + return m +} + +// ProtoMethods is an internal detail of the protoreflect.Message interface. +// Users should never call this directly. +func (m *Message) ProtoMethods() *protoiface.Methods { + return nil +} + +// Range visits every populated field in undefined order. +// See protoreflect.Message for details. +func (m *Message) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + for num, v := range m.known { + fd := m.ext[num] + if fd == nil { + fd = m.Descriptor().Fields().ByNumber(num) + } + if !isSet(fd, v) { + continue + } + if !f(fd, v) { + return + } + } +} + +// Has reports whether a field is populated. +// See protoreflect.Message for details. +func (m *Message) Has(fd protoreflect.FieldDescriptor) bool { + m.checkField(fd) + if fd.IsExtension() && m.ext[fd.Number()] != fd { + return false + } + v, ok := m.known[fd.Number()] + if !ok { + return false + } + return isSet(fd, v) +} + +// Clear clears a field. +// See protoreflect.Message for details. +func (m *Message) Clear(fd protoreflect.FieldDescriptor) { + m.checkField(fd) + num := fd.Number() + delete(m.known, num) + delete(m.ext, num) +} + +// Get returns the value of a field. +// See protoreflect.Message for details. +func (m *Message) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.checkField(fd) + num := fd.Number() + if fd.IsExtension() { + if fd != m.ext[num] { + return fd.(protoreflect.ExtensionTypeDescriptor).Type().Zero() + } + return m.known[num] + } + if v, ok := m.known[num]; ok { + switch { + case fd.IsMap(): + if v.Map().Len() > 0 { + return v + } + case fd.IsList(): + if v.List().Len() > 0 { + return v + } + default: + return v + } + } + switch { + case fd.IsMap(): + return protoreflect.ValueOfMap(&dynamicMap{desc: fd}) + case fd.IsList(): + return protoreflect.ValueOfList(emptyList{desc: fd}) + case fd.Message() != nil: + return protoreflect.ValueOfMessage(&Message{typ: messageType{fd.Message()}}) + case fd.Kind() == protoreflect.BytesKind: + return protoreflect.ValueOfBytes(append([]byte(nil), fd.Default().Bytes()...)) + default: + return fd.Default() + } +} + +// Mutable returns a mutable reference to a repeated, map, or message field. +// See protoreflect.Message for details. +func (m *Message) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.checkField(fd) + if !fd.IsMap() && !fd.IsList() && fd.Message() == nil { + panic(errors.New("%v: getting mutable reference to non-composite type", fd.FullName())) + } + if m.known == nil { + panic(errors.New("%v: modification of read-only message", fd.FullName())) + } + num := fd.Number() + if fd.IsExtension() { + if fd != m.ext[num] { + m.ext[num] = fd + m.known[num] = fd.(protoreflect.ExtensionTypeDescriptor).Type().New() + } + return m.known[num] + } + if v, ok := m.known[num]; ok { + return v + } + m.clearOtherOneofFields(fd) + m.known[num] = m.NewField(fd) + if fd.IsExtension() { + m.ext[num] = fd + } + return m.known[num] +} + +// Set stores a value in a field. +// See protoreflect.Message for details. +func (m *Message) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + m.checkField(fd) + if m.known == nil { + panic(errors.New("%v: modification of read-only message", fd.FullName())) + } + if fd.IsExtension() { + isValid := true + switch { + case !fd.(protoreflect.ExtensionTypeDescriptor).Type().IsValidValue(v): + isValid = false + case fd.IsList(): + isValid = v.List().IsValid() + case fd.IsMap(): + isValid = v.Map().IsValid() + case fd.Message() != nil: + isValid = v.Message().IsValid() + } + if !isValid { + panic(errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface())) + } + m.ext[fd.Number()] = fd + } else { + typecheck(fd, v) + } + m.clearOtherOneofFields(fd) + m.known[fd.Number()] = v +} + +func (m *Message) clearOtherOneofFields(fd protoreflect.FieldDescriptor) { + od := fd.ContainingOneof() + if od == nil { + return + } + num := fd.Number() + for i := 0; i < od.Fields().Len(); i++ { + if n := od.Fields().Get(i).Number(); n != num { + delete(m.known, n) + } + } +} + +// NewField returns a new value for assignable to the field of a given descriptor. +// See protoreflect.Message for details. +func (m *Message) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.checkField(fd) + switch { + case fd.IsExtension(): + return fd.(protoreflect.ExtensionTypeDescriptor).Type().New() + case fd.IsMap(): + return protoreflect.ValueOfMap(&dynamicMap{ + desc: fd, + mapv: make(map[interface{}]protoreflect.Value), + }) + case fd.IsList(): + return protoreflect.ValueOfList(&dynamicList{desc: fd}) + case fd.Message() != nil: + return protoreflect.ValueOfMessage(NewMessage(fd.Message()).ProtoReflect()) + default: + return fd.Default() + } +} + +// WhichOneof reports which field in a oneof is populated, returning nil if none are populated. +// See protoreflect.Message for details. +func (m *Message) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + for i := 0; i < od.Fields().Len(); i++ { + fd := od.Fields().Get(i) + if m.Has(fd) { + return fd + } + } + return nil +} + +// GetUnknown returns the raw unknown fields. +// See protoreflect.Message for details. +func (m *Message) GetUnknown() protoreflect.RawFields { + return m.unknown +} + +// SetUnknown sets the raw unknown fields. +// See protoreflect.Message for details. +func (m *Message) SetUnknown(r protoreflect.RawFields) { + if m.known == nil { + panic(errors.New("%v: modification of read-only message", m.typ.desc.FullName())) + } + m.unknown = r +} + +// IsValid reports whether the message is valid. +// See protoreflect.Message for details. +func (m *Message) IsValid() bool { + return m.known != nil +} + +func (m *Message) checkField(fd protoreflect.FieldDescriptor) { + if fd.IsExtension() && fd.ContainingMessage().FullName() == m.Descriptor().FullName() { + if _, ok := fd.(protoreflect.ExtensionTypeDescriptor); !ok { + panic(errors.New("%v: extension field descriptor does not implement ExtensionTypeDescriptor", fd.FullName())) + } + return + } + if fd.Parent() == m.Descriptor() { + return + } + fields := m.Descriptor().Fields() + index := fd.Index() + if index >= fields.Len() || fields.Get(index) != fd { + panic(errors.New("%v: field descriptor does not belong to this message", fd.FullName())) + } +} + +type messageType struct { + desc protoreflect.MessageDescriptor +} + +// NewMessageType creates a new MessageType with the provided descriptor. +// +// MessageTypes created by this package are equal if their descriptors are equal. +// That is, if md1 == md2, then NewMessageType(md1) == NewMessageType(md2). +func NewMessageType(desc protoreflect.MessageDescriptor) protoreflect.MessageType { + return messageType{desc} +} + +func (mt messageType) New() protoreflect.Message { return NewMessage(mt.desc) } +func (mt messageType) Zero() protoreflect.Message { return &Message{typ: messageType{mt.desc}} } +func (mt messageType) Descriptor() protoreflect.MessageDescriptor { return mt.desc } +func (mt messageType) Enum(i int) protoreflect.EnumType { + if ed := mt.desc.Fields().Get(i).Enum(); ed != nil { + return NewEnumType(ed) + } + return nil +} +func (mt messageType) Message(i int) protoreflect.MessageType { + if md := mt.desc.Fields().Get(i).Message(); md != nil { + return NewMessageType(md) + } + return nil +} + +type emptyList struct { + desc protoreflect.FieldDescriptor +} + +func (x emptyList) Len() int { return 0 } +func (x emptyList) Get(n int) protoreflect.Value { panic(errors.New("out of range")) } +func (x emptyList) Set(n int, v protoreflect.Value) { + panic(errors.New("modification of immutable list")) +} +func (x emptyList) Append(v protoreflect.Value) { panic(errors.New("modification of immutable list")) } +func (x emptyList) AppendMutable() protoreflect.Value { + panic(errors.New("modification of immutable list")) +} +func (x emptyList) Truncate(n int) { panic(errors.New("modification of immutable list")) } +func (x emptyList) NewElement() protoreflect.Value { return newListEntry(x.desc) } +func (x emptyList) IsValid() bool { return false } + +type dynamicList struct { + desc protoreflect.FieldDescriptor + list []protoreflect.Value +} + +func (x *dynamicList) Len() int { + return len(x.list) +} + +func (x *dynamicList) Get(n int) protoreflect.Value { + return x.list[n] +} + +func (x *dynamicList) Set(n int, v protoreflect.Value) { + typecheckSingular(x.desc, v) + x.list[n] = v +} + +func (x *dynamicList) Append(v protoreflect.Value) { + typecheckSingular(x.desc, v) + x.list = append(x.list, v) +} + +func (x *dynamicList) AppendMutable() protoreflect.Value { + if x.desc.Message() == nil { + panic(errors.New("%v: invalid AppendMutable on list with non-message type", x.desc.FullName())) + } + v := x.NewElement() + x.Append(v) + return v +} + +func (x *dynamicList) Truncate(n int) { + // Zero truncated elements to avoid keeping data live. + for i := n; i < len(x.list); i++ { + x.list[i] = protoreflect.Value{} + } + x.list = x.list[:n] +} + +func (x *dynamicList) NewElement() protoreflect.Value { + return newListEntry(x.desc) +} + +func (x *dynamicList) IsValid() bool { + return true +} + +type dynamicMap struct { + desc protoreflect.FieldDescriptor + mapv map[interface{}]protoreflect.Value +} + +func (x *dynamicMap) Get(k protoreflect.MapKey) protoreflect.Value { return x.mapv[k.Interface()] } +func (x *dynamicMap) Set(k protoreflect.MapKey, v protoreflect.Value) { + typecheckSingular(x.desc.MapKey(), k.Value()) + typecheckSingular(x.desc.MapValue(), v) + x.mapv[k.Interface()] = v +} +func (x *dynamicMap) Has(k protoreflect.MapKey) bool { return x.Get(k).IsValid() } +func (x *dynamicMap) Clear(k protoreflect.MapKey) { delete(x.mapv, k.Interface()) } +func (x *dynamicMap) Mutable(k protoreflect.MapKey) protoreflect.Value { + if x.desc.MapValue().Message() == nil { + panic(errors.New("%v: invalid Mutable on map with non-message value type", x.desc.FullName())) + } + v := x.Get(k) + if !v.IsValid() { + v = x.NewValue() + x.Set(k, v) + } + return v +} +func (x *dynamicMap) Len() int { return len(x.mapv) } +func (x *dynamicMap) NewValue() protoreflect.Value { + if md := x.desc.MapValue().Message(); md != nil { + return protoreflect.ValueOfMessage(NewMessage(md).ProtoReflect()) + } + return x.desc.MapValue().Default() +} +func (x *dynamicMap) IsValid() bool { + return x.mapv != nil +} + +func (x *dynamicMap) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) { + for k, v := range x.mapv { + if !f(protoreflect.ValueOf(k).MapKey(), v) { + return + } + } +} + +func isSet(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + case fd.IsMap(): + return v.Map().Len() > 0 + case fd.IsList(): + return v.List().Len() > 0 + case fd.ContainingOneof() != nil: + return true + case fd.Syntax() == protoreflect.Proto3 && !fd.IsExtension(): + switch fd.Kind() { + case protoreflect.BoolKind: + return v.Bool() + case protoreflect.EnumKind: + return v.Enum() != 0 + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind: + return v.Int() != 0 + case protoreflect.Uint32Kind, protoreflect.Uint64Kind, protoreflect.Fixed32Kind, protoreflect.Fixed64Kind: + return v.Uint() != 0 + case protoreflect.FloatKind, protoreflect.DoubleKind: + return v.Float() != 0 || math.Signbit(v.Float()) + case protoreflect.StringKind: + return v.String() != "" + case protoreflect.BytesKind: + return len(v.Bytes()) > 0 + } + } + return true +} + +func typecheck(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + if err := typeIsValid(fd, v); err != nil { + panic(err) + } +} + +func typeIsValid(fd protoreflect.FieldDescriptor, v protoreflect.Value) error { + switch { + case !v.IsValid(): + return errors.New("%v: assigning invalid value", fd.FullName()) + case fd.IsMap(): + if mapv, ok := v.Interface().(*dynamicMap); !ok || mapv.desc != fd || !mapv.IsValid() { + return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface()) + } + return nil + case fd.IsList(): + switch list := v.Interface().(type) { + case *dynamicList: + if list.desc == fd && list.IsValid() { + return nil + } + case emptyList: + if list.desc == fd && list.IsValid() { + return nil + } + } + return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface()) + default: + return singularTypeIsValid(fd, v) + } +} + +func typecheckSingular(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + if err := singularTypeIsValid(fd, v); err != nil { + panic(err) + } +} + +func singularTypeIsValid(fd protoreflect.FieldDescriptor, v protoreflect.Value) error { + vi := v.Interface() + var ok bool + switch fd.Kind() { + case protoreflect.BoolKind: + _, ok = vi.(bool) + case protoreflect.EnumKind: + // We could check against the valid set of enum values, but do not. + _, ok = vi.(protoreflect.EnumNumber) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + _, ok = vi.(int32) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + _, ok = vi.(uint32) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + _, ok = vi.(int64) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + _, ok = vi.(uint64) + case protoreflect.FloatKind: + _, ok = vi.(float32) + case protoreflect.DoubleKind: + _, ok = vi.(float64) + case protoreflect.StringKind: + _, ok = vi.(string) + case protoreflect.BytesKind: + _, ok = vi.([]byte) + case protoreflect.MessageKind, protoreflect.GroupKind: + var m protoreflect.Message + m, ok = vi.(protoreflect.Message) + if ok && m.Descriptor().FullName() != fd.Message().FullName() { + return errors.New("%v: assigning invalid message type %v", fd.FullName(), m.Descriptor().FullName()) + } + if dm, ok := vi.(*Message); ok && dm.known == nil { + return errors.New("%v: assigning invalid zero-value message", fd.FullName()) + } + } + if !ok { + return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface()) + } + return nil +} + +func newListEntry(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.Kind() { + case protoreflect.BoolKind: + return protoreflect.ValueOfBool(false) + case protoreflect.EnumKind: + return protoreflect.ValueOfEnum(fd.Enum().Values().Get(0).Number()) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return protoreflect.ValueOfInt32(0) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return protoreflect.ValueOfUint32(0) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return protoreflect.ValueOfInt64(0) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return protoreflect.ValueOfUint64(0) + case protoreflect.FloatKind: + return protoreflect.ValueOfFloat32(0) + case protoreflect.DoubleKind: + return protoreflect.ValueOfFloat64(0) + case protoreflect.StringKind: + return protoreflect.ValueOfString("") + case protoreflect.BytesKind: + return protoreflect.ValueOfBytes(nil) + case protoreflect.MessageKind, protoreflect.GroupKind: + return protoreflect.ValueOfMessage(NewMessage(fd.Message()).ProtoReflect()) + } + panic(errors.New("%v: unknown kind %v", fd.FullName(), fd.Kind())) +} + +// NewExtensionType creates a new ExtensionType with the provided descriptor. +// +// Dynamic ExtensionTypes with the same descriptor compare as equal. That is, +// if xd1 == xd2, then NewExtensionType(xd1) == NewExtensionType(xd2). +// +// The InterfaceOf and ValueOf methods of the extension type are defined as: +// +// func (xt extensionType) ValueOf(iv interface{}) protoreflect.Value { +// return protoreflect.ValueOf(iv) +// } +// +// func (xt extensionType) InterfaceOf(v protoreflect.Value) interface{} { +// return v.Interface() +// } +// +// The Go type used by the proto.GetExtension and proto.SetExtension functions +// is determined by these methods, and is therefore equivalent to the Go type +// used to represent a protoreflect.Value. See the protoreflect.Value +// documentation for more details. +func NewExtensionType(desc protoreflect.ExtensionDescriptor) protoreflect.ExtensionType { + if xt, ok := desc.(protoreflect.ExtensionTypeDescriptor); ok { + desc = xt.Descriptor() + } + return extensionType{extensionTypeDescriptor{desc}} +} + +func (xt extensionType) New() protoreflect.Value { + switch { + case xt.desc.IsMap(): + return protoreflect.ValueOfMap(&dynamicMap{ + desc: xt.desc, + mapv: make(map[interface{}]protoreflect.Value), + }) + case xt.desc.IsList(): + return protoreflect.ValueOfList(&dynamicList{desc: xt.desc}) + case xt.desc.Message() != nil: + return protoreflect.ValueOfMessage(NewMessage(xt.desc.Message())) + default: + return xt.desc.Default() + } +} + +func (xt extensionType) Zero() protoreflect.Value { + switch { + case xt.desc.IsMap(): + return protoreflect.ValueOfMap(&dynamicMap{desc: xt.desc}) + case xt.desc.Cardinality() == protoreflect.Repeated: + return protoreflect.ValueOfList(emptyList{desc: xt.desc}) + case xt.desc.Message() != nil: + return protoreflect.ValueOfMessage(&Message{typ: messageType{xt.desc.Message()}}) + default: + return xt.desc.Default() + } +} + +func (xt extensionType) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { + return xt.desc +} + +func (xt extensionType) ValueOf(iv interface{}) protoreflect.Value { + v := protoreflect.ValueOf(iv) + typecheck(xt.desc, v) + return v +} + +func (xt extensionType) InterfaceOf(v protoreflect.Value) interface{} { + typecheck(xt.desc, v) + return v.Interface() +} + +func (xt extensionType) IsValidInterface(iv interface{}) bool { + return typeIsValid(xt.desc, protoreflect.ValueOf(iv)) == nil +} + +func (xt extensionType) IsValidValue(v protoreflect.Value) bool { + return typeIsValid(xt.desc, v) == nil +} + +type extensionTypeDescriptor struct { + protoreflect.ExtensionDescriptor +} + +func (xt extensionTypeDescriptor) Type() protoreflect.ExtensionType { + return extensionType{xt} +} + +func (xt extensionTypeDescriptor) Descriptor() protoreflect.ExtensionDescriptor { + return xt.ExtensionDescriptor +} diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 8c10797b..a6c7a33f 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -37,8 +37,7 @@ // It is functionally a tuple of the full name of the remote message type and // the serialized bytes of the remote message value. // -// -// Constructing an Any +// # Constructing an Any // // An Any message containing another message value is constructed using New: // @@ -48,8 +47,7 @@ // } // ... // make use of any // -// -// Unmarshaling an Any +// # Unmarshaling an Any // // With a populated Any message, the underlying message can be serialized into // a remote concrete message value in a few ways. @@ -95,8 +93,7 @@ // listed in the case clauses are linked into the Go binary and therefore also // registered in the global registry. // -// -// Type checking an Any +// # Type checking an Any // // In order to type check whether an Any message represents some other message, // then use the MessageIs method: @@ -115,7 +112,6 @@ // } // ... // make use of m // } -// package anypb import ( @@ -136,45 +132,49 @@ import ( // // Example 1: Pack and unpack a message in C++. // -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } // // Example 2: Pack and unpack a message in Java. // -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := anypb.New(foo) -// if err != nil { -// ... -// } -// ... -// foo := &pb.Foo{} -// if err := any.UnmarshalTo(foo); err != nil { -// ... -// } +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// // or ... +// if (any.isSameTypeAs(Foo.getDefaultInstance())) { +// foo = any.unpack(Foo.getDefaultInstance()); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } +// ... +// foo := &pb.Foo{} +// if err := any.UnmarshalTo(foo); err != nil { +// ... +// } // // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack @@ -182,35 +182,33 @@ import ( // in the type URL, for example "foo.bar.com/x/y.z" will yield type // name "y.z". // +// # JSON // -// JSON -// ==== // The JSON representation of an `Any` value uses the regular // representation of the deserialized, embedded message, with an // additional field `@type` which contains the type URL. Example: // -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } // -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } // // If the embedded message type is well-known and has a custom JSON // representation, that representation will be embedded adding a field // `value` which holds the custom JSON in addition to the `@type` // field. Example (for message [google.protobuf.Duration][]): // -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } type Any struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -228,14 +226,14 @@ type Any struct { // scheme `http`, `https`, or no scheme, one can optionally set up a type // server that maps type URLs to message definitions as follows: // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) + // - If no scheme is provided, `https` is assumed. + // - An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // - Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) // // Note: this functionality is not currently available in the official // protobuf release, and it is not used for type URLs beginning with @@ -243,7 +241,6 @@ type Any struct { // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. - // TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index a583ca2f..df709a8d 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -35,8 +35,7 @@ // // The Duration message represents a signed span of time. // -// -// Conversion to a Go Duration +// # Conversion to a Go Duration // // The AsDuration method can be used to convert a Duration message to a // standard Go time.Duration value: @@ -65,15 +64,13 @@ // the resulting value to the closest representable value (e.g., math.MaxInt64 // for positive overflow and math.MinInt64 for negative overflow). // -// -// Conversion from a Go Duration +// # Conversion from a Go Duration // // The durationpb.New function can be used to construct a Duration message // from a standard Go time.Duration value: // // dur := durationpb.New(d) // ... // make use of d as a *durationpb.Duration -// package durationpb import ( @@ -96,43 +93,43 @@ import ( // // Example 1: Compute Duration from two Timestamps in pseudo code. // -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; // -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; // -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (duration.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (duration.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } // // Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. // -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; // -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; // -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } // // Example 3: Compute Duration from datetime.timedelta in Python. // -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) // // # JSON Mapping // @@ -143,8 +140,6 @@ import ( // encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should // be expressed in JSON format as "3.000000001s", and 3 seconds and 1 // microsecond should be expressed in JSON format as "3.000001s". -// -// type Duration struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index c9ae9213..61f69fc1 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -36,8 +36,7 @@ // The Timestamp message represents a timestamp, // an instant in time since the Unix epoch (January 1st, 1970). // -// -// Conversion to a Go Time +// # Conversion to a Go Time // // The AsTime method can be used to convert a Timestamp message to a // standard Go time.Time value in UTC: @@ -59,8 +58,7 @@ // ... // handle error // } // -// -// Conversion from a Go Time +// # Conversion from a Go Time // // The timestamppb.New function can be used to construct a Timestamp message // from a standard Go time.Time value: @@ -72,7 +70,6 @@ // // ts := timestamppb.Now() // ... // make use of ts as a *timestamppb.Timestamp -// package timestamppb import ( @@ -101,52 +98,50 @@ import ( // // Example 1: Compute Timestamp from POSIX `time()`. // -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); // // Example 2: Compute Timestamp from POSIX `gettimeofday()`. // -// struct timeval tv; -// gettimeofday(&tv, NULL); +// struct timeval tv; +// gettimeofday(&tv, NULL); // -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); // // Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. // -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; // -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); // // Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. // -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// long millis = System.currentTimeMillis(); // +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); // // Example 5: Compute Timestamp from Java `Instant.now()`. // -// Instant now = Instant.now(); -// -// Timestamp timestamp = -// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) -// .setNanos(now.getNano()).build(); +// Instant now = Instant.now(); // +// Timestamp timestamp = +// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) +// .setNanos(now.getNano()).build(); // // Example 6: Compute Timestamp from current time in Python. // -// timestamp = Timestamp() -// timestamp.GetCurrentTime() +// timestamp = Timestamp() +// timestamp.GetCurrentTime() // // # JSON Mapping // @@ -174,8 +169,6 @@ import ( // the Joda Time's [`ISODateTimeFormat.dateTime()`]( // http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D // ) to obtain a formatter capable of generating timestamps in this format. -// -// type Timestamp struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go new file mode 100644 index 00000000..d0bb96a9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go @@ -0,0 +1,656 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is +// just a program that reads a CodeGeneratorRequest from stdin and writes a +// CodeGeneratorResponse to stdout. +// +// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead +// of dealing with the raw protocol defined here. +// +// A plugin executable needs only to be placed somewhere in the path. The +// plugin should be named "protoc-gen-$NAME", and will then be used when the +// flag "--${NAME}_out" is passed to protoc. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/compiler/plugin.proto + +package pluginpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +// Sync with code_generator.h. +type CodeGeneratorResponse_Feature int32 + +const ( + CodeGeneratorResponse_FEATURE_NONE CodeGeneratorResponse_Feature = 0 + CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL CodeGeneratorResponse_Feature = 1 +) + +// Enum value maps for CodeGeneratorResponse_Feature. +var ( + CodeGeneratorResponse_Feature_name = map[int32]string{ + 0: "FEATURE_NONE", + 1: "FEATURE_PROTO3_OPTIONAL", + } + CodeGeneratorResponse_Feature_value = map[string]int32{ + "FEATURE_NONE": 0, + "FEATURE_PROTO3_OPTIONAL": 1, + } +) + +func (x CodeGeneratorResponse_Feature) Enum() *CodeGeneratorResponse_Feature { + p := new(CodeGeneratorResponse_Feature) + *p = x + return p +} + +func (x CodeGeneratorResponse_Feature) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CodeGeneratorResponse_Feature) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_compiler_plugin_proto_enumTypes[0].Descriptor() +} + +func (CodeGeneratorResponse_Feature) Type() protoreflect.EnumType { + return &file_google_protobuf_compiler_plugin_proto_enumTypes[0] +} + +func (x CodeGeneratorResponse_Feature) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *CodeGeneratorResponse_Feature) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = CodeGeneratorResponse_Feature(num) + return nil +} + +// Deprecated: Use CodeGeneratorResponse_Feature.Descriptor instead. +func (CodeGeneratorResponse_Feature) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2, 0} +} + +// The version number of protocol compiler. +type Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` +} + +func (x *Version) Reset() { + *x = Version{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Version) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Version) ProtoMessage() {} + +func (x *Version) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Version.ProtoReflect.Descriptor instead. +func (*Version) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{0} +} + +func (x *Version) GetMajor() int32 { + if x != nil && x.Major != nil { + return *x.Major + } + return 0 +} + +func (x *Version) GetMinor() int32 { + if x != nil && x.Minor != nil { + return *x.Minor + } + return 0 +} + +func (x *Version) GetPatch() int32 { + if x != nil && x.Patch != nil { + return *x.Patch + } + return 0 +} + +func (x *Version) GetSuffix() string { + if x != nil && x.Suffix != nil { + return *x.Suffix + } + return "" +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +type CodeGeneratorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"` + // The generator parameter passed on the command-line. + Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + ProtoFile []*descriptorpb.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` + // The version number of protocol compiler. + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` +} + +func (x *CodeGeneratorRequest) Reset() { + *x = CodeGeneratorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CodeGeneratorRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CodeGeneratorRequest) ProtoMessage() {} + +func (x *CodeGeneratorRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CodeGeneratorRequest.ProtoReflect.Descriptor instead. +func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{1} +} + +func (x *CodeGeneratorRequest) GetFileToGenerate() []string { + if x != nil { + return x.FileToGenerate + } + return nil +} + +func (x *CodeGeneratorRequest) GetParameter() string { + if x != nil && x.Parameter != nil { + return *x.Parameter + } + return "" +} + +func (x *CodeGeneratorRequest) GetProtoFile() []*descriptorpb.FileDescriptorProto { + if x != nil { + return x.ProtoFile + } + return nil +} + +func (x *CodeGeneratorRequest) GetCompilerVersion() *Version { + if x != nil { + return x.CompilerVersion + } + return nil +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +type CodeGeneratorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + // A bitmask of supported features that the code generator supports. + // This is a bitwise "or" of values from the Feature enum. + SupportedFeatures *uint64 `protobuf:"varint,2,opt,name=supported_features,json=supportedFeatures" json:"supported_features,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` +} + +func (x *CodeGeneratorResponse) Reset() { + *x = CodeGeneratorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CodeGeneratorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CodeGeneratorResponse) ProtoMessage() {} + +func (x *CodeGeneratorResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CodeGeneratorResponse.ProtoReflect.Descriptor instead. +func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2} +} + +func (x *CodeGeneratorResponse) GetError() string { + if x != nil && x.Error != nil { + return *x.Error + } + return "" +} + +func (x *CodeGeneratorResponse) GetSupportedFeatures() uint64 { + if x != nil && x.SupportedFeatures != nil { + return *x.SupportedFeatures + } + return 0 +} + +func (x *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { + if x != nil { + return x.File + } + return nil +} + +// Represents a single generated file. +type CodeGeneratorResponse_File struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // + // @@protoc_insertion_point(NAME) + // + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // + // // @@protoc_insertion_point(namespace_scope) + // + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` + // The file contents. + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + // Information describing the file content being inserted. If an insertion + // point is used, this information will be appropriately offset and inserted + // into the code generation metadata for the generated files. + GeneratedCodeInfo *descriptorpb.GeneratedCodeInfo `protobuf:"bytes,16,opt,name=generated_code_info,json=generatedCodeInfo" json:"generated_code_info,omitempty"` +} + +func (x *CodeGeneratorResponse_File) Reset() { + *x = CodeGeneratorResponse_File{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CodeGeneratorResponse_File) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CodeGeneratorResponse_File) ProtoMessage() {} + +func (x *CodeGeneratorResponse_File) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CodeGeneratorResponse_File.ProtoReflect.Descriptor instead. +func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *CodeGeneratorResponse_File) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *CodeGeneratorResponse_File) GetInsertionPoint() string { + if x != nil && x.InsertionPoint != nil { + return *x.InsertionPoint + } + return "" +} + +func (x *CodeGeneratorResponse_File) GetContent() string { + if x != nil && x.Content != nil { + return *x.Content + } + return "" +} + +func (x *CodeGeneratorResponse_File) GetGeneratedCodeInfo() *descriptorpb.GeneratedCodeInfo { + if x != nil { + return x.GeneratedCodeInfo + } + return nil +} + +var File_google_protobuf_compiler_plugin_proto protoreflect.FileDescriptor + +var file_google_protobuf_compiler_plugin_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, + 0x72, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, + 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, + 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x22, 0xf1, 0x01, 0x0a, 0x14, 0x43, 0x6f, 0x64, + 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x28, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x6c, + 0x65, 0x54, 0x6f, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0a, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x4c, + 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x72, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x63, 0x6f, 0x6d, + 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x94, 0x03, 0x0a, + 0x15, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2d, 0x0a, 0x12, + 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x04, 0x66, + 0x69, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, + 0x69, 0x6c, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52, + 0x04, 0x66, 0x69, 0x6c, 0x65, 0x1a, 0xb1, 0x01, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x6e, 0x73, + 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x52, 0x0a, 0x13, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x11, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x38, 0x0a, 0x07, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, + 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, + 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, + 0x4c, 0x10, 0x01, 0x42, 0x72, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x72, 0x42, 0x0c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x73, 0x5a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x70, 0x62, 0xaa, 0x02, 0x18, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x43, + 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, +} + +var ( + file_google_protobuf_compiler_plugin_proto_rawDescOnce sync.Once + file_google_protobuf_compiler_plugin_proto_rawDescData = file_google_protobuf_compiler_plugin_proto_rawDesc +) + +func file_google_protobuf_compiler_plugin_proto_rawDescGZIP() []byte { + file_google_protobuf_compiler_plugin_proto_rawDescOnce.Do(func() { + file_google_protobuf_compiler_plugin_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_compiler_plugin_proto_rawDescData) + }) + return file_google_protobuf_compiler_plugin_proto_rawDescData +} + +var file_google_protobuf_compiler_plugin_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_protobuf_compiler_plugin_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_protobuf_compiler_plugin_proto_goTypes = []interface{}{ + (CodeGeneratorResponse_Feature)(0), // 0: google.protobuf.compiler.CodeGeneratorResponse.Feature + (*Version)(nil), // 1: google.protobuf.compiler.Version + (*CodeGeneratorRequest)(nil), // 2: google.protobuf.compiler.CodeGeneratorRequest + (*CodeGeneratorResponse)(nil), // 3: google.protobuf.compiler.CodeGeneratorResponse + (*CodeGeneratorResponse_File)(nil), // 4: google.protobuf.compiler.CodeGeneratorResponse.File + (*descriptorpb.FileDescriptorProto)(nil), // 5: google.protobuf.FileDescriptorProto + (*descriptorpb.GeneratedCodeInfo)(nil), // 6: google.protobuf.GeneratedCodeInfo +} +var file_google_protobuf_compiler_plugin_proto_depIdxs = []int32{ + 5, // 0: google.protobuf.compiler.CodeGeneratorRequest.proto_file:type_name -> google.protobuf.FileDescriptorProto + 1, // 1: google.protobuf.compiler.CodeGeneratorRequest.compiler_version:type_name -> google.protobuf.compiler.Version + 4, // 2: google.protobuf.compiler.CodeGeneratorResponse.file:type_name -> google.protobuf.compiler.CodeGeneratorResponse.File + 6, // 3: google.protobuf.compiler.CodeGeneratorResponse.File.generated_code_info:type_name -> google.protobuf.GeneratedCodeInfo + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_google_protobuf_compiler_plugin_proto_init() } +func file_google_protobuf_compiler_plugin_proto_init() { + if File_google_protobuf_compiler_plugin_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_compiler_plugin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_compiler_plugin_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CodeGeneratorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_compiler_plugin_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CodeGeneratorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_compiler_plugin_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CodeGeneratorResponse_File); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_compiler_plugin_proto_rawDesc, + NumEnums: 1, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_compiler_plugin_proto_goTypes, + DependencyIndexes: file_google_protobuf_compiler_plugin_proto_depIdxs, + EnumInfos: file_google_protobuf_compiler_plugin_proto_enumTypes, + MessageInfos: file_google_protobuf_compiler_plugin_proto_msgTypes, + }.Build() + File_google_protobuf_compiler_plugin_proto = out.File + file_google_protobuf_compiler_plugin_proto_rawDesc = nil + file_google_protobuf_compiler_plugin_proto_goTypes = nil + file_google_protobuf_compiler_plugin_proto_depIdxs = nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 3b5d4b45..528d2af8 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -27,7 +27,7 @@ github.com/btcsuite/btcutil/base58 # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/containerd/cgroups v1.0.4 +# github.com/containerd/cgroups v1.1.0 ## explicit; go 1.17 github.com/containerd/cgroups github.com/containerd/cgroups/stats/v1 @@ -92,7 +92,7 @@ github.com/go-oauth2/oauth2/v4/store # github.com/go-session/session v3.1.2+incompatible ## explicit github.com/go-session/session -# github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 +# github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 ## explicit; go 1.13 github.com/go-task/slim-sprig # github.com/godbus/dbus/v5 v5.1.0 @@ -113,7 +113,7 @@ github.com/golang/glog ## explicit; go 1.11 github.com/golang/mock/mockgen github.com/golang/mock/mockgen/model -# github.com/golang/protobuf v1.5.2 +# github.com/golang/protobuf v1.5.3 ## explicit; go 1.9 github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto @@ -125,8 +125,8 @@ github.com/golang/protobuf/ptypes/timestamp # github.com/google/gopacket v1.1.19 ## explicit; go 1.12 github.com/google/gopacket/routing -# github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b -## explicit; go 1.18 +# github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b +## explicit; go 1.19 github.com/google/pprof/profile # github.com/google/uuid v1.3.0 ## explicit @@ -167,7 +167,7 @@ github.com/heetch/confita/backend github.com/heetch/confita/backend/env github.com/heetch/confita/backend/file github.com/heetch/confita/backend/flags -# github.com/huin/goupnp v1.0.3 +# github.com/huin/goupnp v1.1.0 ## explicit; go 1.14 github.com/huin/goupnp github.com/huin/goupnp/dcps/internetgateway1 @@ -176,8 +176,8 @@ github.com/huin/goupnp/httpu github.com/huin/goupnp/scpd github.com/huin/goupnp/soap github.com/huin/goupnp/ssdp -# github.com/ipfs/go-cid v0.3.2 -## explicit; go 1.18 +# github.com/ipfs/go-cid v0.4.1 +## explicit; go 1.19 github.com/ipfs/go-cid # github.com/ipfs/go-datastore v0.6.0 ## explicit; go 1.17 @@ -234,21 +234,24 @@ github.com/jbenet/goprocess/context github.com/karlseguin/ccache # github.com/karlseguin/expect v1.0.8 ## explicit; go 1.14 -# github.com/klauspost/compress v1.15.14 -## explicit; go 1.17 +# github.com/klauspost/compress v1.16.4 +## explicit; go 1.18 github.com/klauspost/compress +github.com/klauspost/compress/flate github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 github.com/klauspost/compress/internal/cpuinfo github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash -# github.com/klauspost/cpuid/v2 v2.2.3 +# github.com/klauspost/cpuid/v2 v2.2.4 ## explicit; go 1.15 github.com/klauspost/cpuid/v2 -# github.com/koron/go-ssdp v0.0.3 -## explicit; go 1.17 +# github.com/koron/go-ssdp v0.0.4 +## explicit; go 1.19 github.com/koron/go-ssdp +github.com/koron/go-ssdp/internal/multicast +github.com/koron/go-ssdp/internal/ssdplog # github.com/libp2p/go-buffer-pool v0.1.0 ## explicit; go 1.17 github.com/libp2p/go-buffer-pool @@ -259,8 +262,8 @@ github.com/libp2p/go-cidranger/net # github.com/libp2p/go-flow-metrics v0.1.0 ## explicit; go 1.17 github.com/libp2p/go-flow-metrics -# github.com/libp2p/go-libp2p v0.24.2 -## explicit; go 1.18 +# github.com/libp2p/go-libp2p v0.27.8 +## explicit; go 1.19 github.com/libp2p/go-libp2p github.com/libp2p/go-libp2p/config github.com/libp2p/go-libp2p/core/canonicallog @@ -271,8 +274,6 @@ github.com/libp2p/go-libp2p/core/crypto/pb github.com/libp2p/go-libp2p/core/event github.com/libp2p/go-libp2p/core/host github.com/libp2p/go-libp2p/core/internal/catch -github.com/libp2p/go-libp2p/core/introspection -github.com/libp2p/go-libp2p/core/introspection/pb github.com/libp2p/go-libp2p/core/metrics github.com/libp2p/go-libp2p/core/network github.com/libp2p/go-libp2p/core/peer @@ -299,6 +300,7 @@ github.com/libp2p/go-libp2p/p2p/host/pstoremanager github.com/libp2p/go-libp2p/p2p/host/relaysvc github.com/libp2p/go-libp2p/p2p/host/resource-manager github.com/libp2p/go-libp2p/p2p/host/routed +github.com/libp2p/go-libp2p/p2p/metricshelper github.com/libp2p/go-libp2p/p2p/muxer/yamux github.com/libp2p/go-libp2p/p2p/net/connmgr github.com/libp2p/go-libp2p/p2p/net/nat @@ -306,8 +308,6 @@ github.com/libp2p/go-libp2p/p2p/net/pnet github.com/libp2p/go-libp2p/p2p/net/reuseport github.com/libp2p/go-libp2p/p2p/net/swarm github.com/libp2p/go-libp2p/p2p/net/upgrader -github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb -github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/relay github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto @@ -325,8 +325,9 @@ github.com/libp2p/go-libp2p/p2p/transport/quic github.com/libp2p/go-libp2p/p2p/transport/quicreuse github.com/libp2p/go-libp2p/p2p/transport/tcp github.com/libp2p/go-libp2p/p2p/transport/websocket -# github.com/libp2p/go-libp2p-asn-util v0.2.0 -## explicit; go 1.17 +github.com/libp2p/go-libp2p/p2p/transport/webtransport +# github.com/libp2p/go-libp2p-asn-util v0.3.0 +## explicit; go 1.19 github.com/libp2p/go-libp2p-asn-util # github.com/libp2p/go-libp2p-kad-dht v0.20.0 ## explicit; go 1.18 @@ -351,6 +352,7 @@ github.com/libp2p/go-libp2p-record/pb # github.com/libp2p/go-msgio v0.3.0 ## explicit; go 1.18 github.com/libp2p/go-msgio +github.com/libp2p/go-msgio/pbio github.com/libp2p/go-msgio/protoio # github.com/libp2p/go-nat v0.1.0 ## explicit; go 1.16 @@ -358,58 +360,28 @@ github.com/libp2p/go-nat # github.com/libp2p/go-netroute v0.2.1 ## explicit; go 1.18 github.com/libp2p/go-netroute -# github.com/libp2p/go-openssl v0.1.0 -## explicit; go 1.17 -github.com/libp2p/go-openssl -github.com/libp2p/go-openssl/utils # github.com/libp2p/go-reuseport v0.2.0 ## explicit; go 1.17 github.com/libp2p/go-reuseport # github.com/libp2p/go-yamux/v4 v4.0.0 ## explicit; go 1.18 github.com/libp2p/go-yamux/v4 -# github.com/lucas-clemente/quic-go v0.31.1 -## explicit; go 1.18 -github.com/lucas-clemente/quic-go -github.com/lucas-clemente/quic-go/internal/ackhandler -github.com/lucas-clemente/quic-go/internal/congestion -github.com/lucas-clemente/quic-go/internal/flowcontrol -github.com/lucas-clemente/quic-go/internal/handshake -github.com/lucas-clemente/quic-go/internal/logutils -github.com/lucas-clemente/quic-go/internal/protocol -github.com/lucas-clemente/quic-go/internal/qerr -github.com/lucas-clemente/quic-go/internal/qtls -github.com/lucas-clemente/quic-go/internal/utils -github.com/lucas-clemente/quic-go/internal/utils/linkedlist -github.com/lucas-clemente/quic-go/internal/wire -github.com/lucas-clemente/quic-go/logging -github.com/lucas-clemente/quic-go/qlog -github.com/lucas-clemente/quic-go/quicvarint # github.com/mailjet/mailjet-apiv3-go/v3 v3.1.1 ## explicit; go 1.13 github.com/mailjet/mailjet-apiv3-go/v3 github.com/mailjet/mailjet-apiv3-go/v3/fixtures github.com/mailjet/mailjet-apiv3-go/v3/resources -# github.com/marten-seemann/qtls-go1-18 v0.1.4 -## explicit; go 1.18 -github.com/marten-seemann/qtls-go1-18 -# github.com/marten-seemann/qtls-go1-19 v0.1.2 -## explicit; go 1.19 -github.com/marten-seemann/qtls-go1-19 # github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd ## explicit; go 1.15 github.com/marten-seemann/tcp -# github.com/mattn/go-isatty v0.0.17 +# github.com/mattn/go-isatty v0.0.18 ## explicit; go 1.15 github.com/mattn/go-isatty -# github.com/mattn/go-pointer v0.0.1 -## explicit -github.com/mattn/go-pointer # github.com/matttproud/golang_protobuf_extensions v1.0.4 ## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil -# github.com/miekg/dns v1.1.50 -## explicit; go 1.14 +# github.com/miekg/dns v1.1.53 +## explicit; go 1.19 github.com/miekg/dns # github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b ## explicit @@ -429,7 +401,7 @@ github.com/multiformats/go-base32 # github.com/multiformats/go-base36 v0.2.0 ## explicit; go 1.18 github.com/multiformats/go-base36 -# github.com/multiformats/go-multiaddr v0.8.0 +# github.com/multiformats/go-multiaddr v0.9.0 ## explicit; go 1.18 github.com/multiformats/go-multiaddr github.com/multiformats/go-multiaddr/net @@ -439,11 +411,11 @@ github.com/multiformats/go-multiaddr-dns # github.com/multiformats/go-multiaddr-fmt v0.1.0 ## explicit; go 1.13 github.com/multiformats/go-multiaddr-fmt -# github.com/multiformats/go-multibase v0.1.1 -## explicit; go 1.17 +# github.com/multiformats/go-multibase v0.2.0 +## explicit; go 1.19 github.com/multiformats/go-multibase -# github.com/multiformats/go-multicodec v0.7.0 -## explicit; go 1.18 +# github.com/multiformats/go-multicodec v0.8.1 +## explicit; go 1.19 github.com/multiformats/go-multicodec # github.com/multiformats/go-multihash v0.2.1 ## explicit; go 1.17 @@ -455,8 +427,8 @@ github.com/multiformats/go-multihash/register/blake3 github.com/multiformats/go-multihash/register/miniosha256 github.com/multiformats/go-multihash/register/murmur3 github.com/multiformats/go-multihash/register/sha3 -# github.com/multiformats/go-multistream v0.3.3 -## explicit; go 1.17 +# github.com/multiformats/go-multistream v0.4.1 +## explicit; go 1.19 github.com/multiformats/go-multistream # github.com/multiformats/go-varint v0.0.7 ## explicit; go 1.18 @@ -466,7 +438,7 @@ github.com/multiformats/go-varint github.com/nicksnyder/go-i18n/v2/i18n github.com/nicksnyder/go-i18n/v2/internal github.com/nicksnyder/go-i18n/v2/internal/plural -# github.com/onsi/ginkgo/v2 v2.7.0 +# github.com/onsi/ginkgo/v2 v2.9.2 ## explicit; go 1.18 github.com/onsi/ginkgo/v2/config github.com/onsi/ginkgo/v2/formatter @@ -517,8 +489,8 @@ github.com/prometheus/client_golang/prometheus/promhttp # github.com/prometheus/client_model v0.3.0 ## explicit; go 1.9 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.39.0 -## explicit; go 1.17 +# github.com/prometheus/common v0.42.0 +## explicit; go 1.18 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model @@ -527,19 +499,46 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util +# github.com/quic-go/qpack v0.4.0 +## explicit; go 1.18 +github.com/quic-go/qpack +# github.com/quic-go/qtls-go1-19 v0.3.3 +## explicit; go 1.19 +github.com/quic-go/qtls-go1-19 +# github.com/quic-go/qtls-go1-20 v0.2.3 +## explicit; go 1.20 +github.com/quic-go/qtls-go1-20 +# github.com/quic-go/quic-go v0.33.0 +## explicit; go 1.19 +github.com/quic-go/quic-go +github.com/quic-go/quic-go/http3 +github.com/quic-go/quic-go/internal/ackhandler +github.com/quic-go/quic-go/internal/congestion +github.com/quic-go/quic-go/internal/flowcontrol +github.com/quic-go/quic-go/internal/handshake +github.com/quic-go/quic-go/internal/logutils +github.com/quic-go/quic-go/internal/protocol +github.com/quic-go/quic-go/internal/qerr +github.com/quic-go/quic-go/internal/qtls +github.com/quic-go/quic-go/internal/utils +github.com/quic-go/quic-go/internal/utils/linkedlist +github.com/quic-go/quic-go/internal/wire +github.com/quic-go/quic-go/logging +github.com/quic-go/quic-go/qlog +github.com/quic-go/quic-go/quicvarint +# github.com/quic-go/webtransport-go v0.5.2 +## explicit; go 1.18 +github.com/quic-go/webtransport-go # github.com/raulk/go-watchdog v1.3.0 ## explicit; go 1.15 github.com/raulk/go-watchdog # github.com/satori/go.uuid v1.2.0 ## explicit github.com/satori/go.uuid -# github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 -## explicit -github.com/spacemonkeygo/spacelog # github.com/spaolacci/murmur3 v1.1.0 ## explicit github.com/spaolacci/murmur3 -# github.com/stretchr/testify v1.8.1 +# github.com/stretchr/testify v1.8.2 ## explicit; go 1.13 github.com/stretchr/testify/assert github.com/stretchr/testify/require @@ -594,7 +593,7 @@ go.uber.org/dig/internal/digerror go.uber.org/dig/internal/digreflect go.uber.org/dig/internal/dot go.uber.org/dig/internal/graph -# go.uber.org/fx v1.19.1 +# go.uber.org/fx v1.19.2 ## explicit; go 1.18 go.uber.org/fx go.uber.org/fx/fxevent @@ -602,7 +601,7 @@ go.uber.org/fx/internal/fxclock go.uber.org/fx/internal/fxlog go.uber.org/fx/internal/fxreflect go.uber.org/fx/internal/lifecycle -# go.uber.org/multierr v1.9.0 +# go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr # go.uber.org/zap v1.24.0 @@ -633,10 +632,11 @@ golang.org/x/crypto/pbkdf2 golang.org/x/crypto/ripemd160 golang.org/x/crypto/salsa20/salsa golang.org/x/crypto/sha3 -# golang.org/x/exp v0.0.0-20230113213754-f9f960f08ad4 +# golang.org/x/exp v0.0.0-20230321023759-10a507213a29 ## explicit; go 1.18 golang.org/x/exp/constraints -# golang.org/x/mod v0.8.0 +golang.org/x/exp/slices +# golang.org/x/mod v0.10.0 ## explicit; go 1.17 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile @@ -646,7 +646,6 @@ golang.org/x/mod/semver ## explicit; go 1.17 golang.org/x/net/bpf golang.org/x/net/context -golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack @@ -658,7 +657,7 @@ golang.org/x/net/ipv4 golang.org/x/net/ipv6 golang.org/x/net/route golang.org/x/net/trace -# golang.org/x/oauth2 v0.3.0 +# golang.org/x/oauth2 v0.5.0 ## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -686,13 +685,15 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/tools v0.6.0 +# golang.org/x/tools v0.7.0 ## explicit; go 1.18 +golang.org/x/tools/cmd/goimports golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/ast/inspector golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/internal/packagesdriver golang.org/x/tools/go/packages +golang.org/x/tools/go/types/objectpath golang.org/x/tools/imports golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core @@ -771,8 +772,11 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.28.1 +# google.golang.org/protobuf v1.30.0 ## explicit; go 1.11 +google.golang.org/protobuf/cmd/protoc-gen-go +google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo +google.golang.org/protobuf/compiler/protogen google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire @@ -790,6 +794,7 @@ google.golang.org/protobuf/internal/filetype google.golang.org/protobuf/internal/flags google.golang.org/protobuf/internal/genid google.golang.org/protobuf/internal/impl +google.golang.org/protobuf/internal/msgfmt google.golang.org/protobuf/internal/order google.golang.org/protobuf/internal/pragma google.golang.org/protobuf/internal/set @@ -797,14 +802,18 @@ google.golang.org/protobuf/internal/strs google.golang.org/protobuf/internal/version google.golang.org/protobuf/proto google.golang.org/protobuf/reflect/protodesc +google.golang.org/protobuf/reflect/protopath +google.golang.org/protobuf/reflect/protorange google.golang.org/protobuf/reflect/protoreflect google.golang.org/protobuf/reflect/protoregistry google.golang.org/protobuf/runtime/protoiface google.golang.org/protobuf/runtime/protoimpl google.golang.org/protobuf/types/descriptorpb +google.golang.org/protobuf/types/dynamicpb google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/timestamppb +google.golang.org/protobuf/types/pluginpb # gopkg.in/yaml.v2 v2.4.0 ## explicit; go 1.15 gopkg.in/yaml.v2 @@ -814,3 +823,10 @@ gopkg.in/yaml.v3 # lukechampine.com/blake3 v1.1.7 ## explicit; go 1.13 lukechampine.com/blake3 +# nhooyr.io/websocket v1.8.7 +## explicit; go 1.13 +nhooyr.io/websocket +nhooyr.io/websocket/internal/bpool +nhooyr.io/websocket/internal/errd +nhooyr.io/websocket/internal/wsjs +nhooyr.io/websocket/internal/xsync diff --git a/vendor/nhooyr.io/websocket/.gitignore b/vendor/nhooyr.io/websocket/.gitignore new file mode 100644 index 00000000..6961e5c8 --- /dev/null +++ b/vendor/nhooyr.io/websocket/.gitignore @@ -0,0 +1 @@ +websocket.test diff --git a/vendor/github.com/mattn/go-pointer/LICENSE b/vendor/nhooyr.io/websocket/LICENSE.txt similarity index 94% rename from vendor/github.com/mattn/go-pointer/LICENSE rename to vendor/nhooyr.io/websocket/LICENSE.txt index 5794eddc..b5b5fef3 100644 --- a/vendor/github.com/mattn/go-pointer/LICENSE +++ b/vendor/nhooyr.io/websocket/LICENSE.txt @@ -1,6 +1,6 @@ -The MIT License (MIT) +MIT License -Copyright (c) 2019 Yasuhiro Matsumoto +Copyright (c) 2018 Anmol Sethi Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/nhooyr.io/websocket/README.md b/vendor/nhooyr.io/websocket/README.md new file mode 100644 index 00000000..df20c581 --- /dev/null +++ b/vendor/nhooyr.io/websocket/README.md @@ -0,0 +1,132 @@ +# websocket + +[![godoc](https://godoc.org/nhooyr.io/websocket?status.svg)](https://pkg.go.dev/nhooyr.io/websocket) +[![coverage](https://img.shields.io/badge/coverage-88%25-success)](https://nhooyrio-websocket-coverage.netlify.app) + +websocket is a minimal and idiomatic WebSocket library for Go. + +## Install + +```bash +go get nhooyr.io/websocket +``` + +## Highlights + +- Minimal and idiomatic API +- First class [context.Context](https://blog.golang.org/context) support +- Fully passes the WebSocket [autobahn-testsuite](https://github.com/crossbario/autobahn-testsuite) +- [Single dependency](https://pkg.go.dev/nhooyr.io/websocket?tab=imports) +- JSON and protobuf helpers in the [wsjson](https://pkg.go.dev/nhooyr.io/websocket/wsjson) and [wspb](https://pkg.go.dev/nhooyr.io/websocket/wspb) subpackages +- Zero alloc reads and writes +- Concurrent writes +- [Close handshake](https://pkg.go.dev/nhooyr.io/websocket#Conn.Close) +- [net.Conn](https://pkg.go.dev/nhooyr.io/websocket#NetConn) wrapper +- [Ping pong](https://pkg.go.dev/nhooyr.io/websocket#Conn.Ping) API +- [RFC 7692](https://tools.ietf.org/html/rfc7692) permessage-deflate compression +- Compile to [Wasm](https://pkg.go.dev/nhooyr.io/websocket#hdr-Wasm) + +## Roadmap + +- [ ] HTTP/2 [#4](https://github.com/nhooyr/websocket/issues/4) + +## Examples + +For a production quality example that demonstrates the complete API, see the +[echo example](./examples/echo). + +For a full stack example, see the [chat example](./examples/chat). + +### Server + +```go +http.HandlerFunc(func (w http.ResponseWriter, r *http.Request) { + c, err := websocket.Accept(w, r, nil) + if err != nil { + // ... + } + defer c.Close(websocket.StatusInternalError, "the sky is falling") + + ctx, cancel := context.WithTimeout(r.Context(), time.Second*10) + defer cancel() + + var v interface{} + err = wsjson.Read(ctx, c, &v) + if err != nil { + // ... + } + + log.Printf("received: %v", v) + + c.Close(websocket.StatusNormalClosure, "") +}) +``` + +### Client + +```go +ctx, cancel := context.WithTimeout(context.Background(), time.Minute) +defer cancel() + +c, _, err := websocket.Dial(ctx, "ws://localhost:8080", nil) +if err != nil { + // ... +} +defer c.Close(websocket.StatusInternalError, "the sky is falling") + +err = wsjson.Write(ctx, c, "hi") +if err != nil { + // ... +} + +c.Close(websocket.StatusNormalClosure, "") +``` + +## Comparison + +### gorilla/websocket + +Advantages of [gorilla/websocket](https://github.com/gorilla/websocket): + +- Mature and widely used +- [Prepared writes](https://pkg.go.dev/github.com/gorilla/websocket#PreparedMessage) +- Configurable [buffer sizes](https://pkg.go.dev/github.com/gorilla/websocket#hdr-Buffers) + +Advantages of nhooyr.io/websocket: + +- Minimal and idiomatic API + - Compare godoc of [nhooyr.io/websocket](https://pkg.go.dev/nhooyr.io/websocket) with [gorilla/websocket](https://pkg.go.dev/github.com/gorilla/websocket) side by side. +- [net.Conn](https://pkg.go.dev/nhooyr.io/websocket#NetConn) wrapper +- Zero alloc reads and writes ([gorilla/websocket#535](https://github.com/gorilla/websocket/issues/535)) +- Full [context.Context](https://blog.golang.org/context) support +- Dial uses [net/http.Client](https://golang.org/pkg/net/http/#Client) + - Will enable easy HTTP/2 support in the future + - Gorilla writes directly to a net.Conn and so duplicates features of net/http.Client. +- Concurrent writes +- Close handshake ([gorilla/websocket#448](https://github.com/gorilla/websocket/issues/448)) +- Idiomatic [ping pong](https://pkg.go.dev/nhooyr.io/websocket#Conn.Ping) API + - Gorilla requires registering a pong callback before sending a Ping +- Can target Wasm ([gorilla/websocket#432](https://github.com/gorilla/websocket/issues/432)) +- Transparent message buffer reuse with [wsjson](https://pkg.go.dev/nhooyr.io/websocket/wsjson) and [wspb](https://pkg.go.dev/nhooyr.io/websocket/wspb) subpackages +- [1.75x](https://github.com/nhooyr/websocket/releases/tag/v1.7.4) faster WebSocket masking implementation in pure Go + - Gorilla's implementation is slower and uses [unsafe](https://golang.org/pkg/unsafe/). +- Full [permessage-deflate](https://tools.ietf.org/html/rfc7692) compression extension support + - Gorilla only supports no context takeover mode + - We use [klauspost/compress](https://github.com/klauspost/compress) for much lower memory usage ([gorilla/websocket#203](https://github.com/gorilla/websocket/issues/203)) +- [CloseRead](https://pkg.go.dev/nhooyr.io/websocket#Conn.CloseRead) helper ([gorilla/websocket#492](https://github.com/gorilla/websocket/issues/492)) +- Actively maintained ([gorilla/websocket#370](https://github.com/gorilla/websocket/issues/370)) + +#### golang.org/x/net/websocket + +[golang.org/x/net/websocket](https://pkg.go.dev/golang.org/x/net/websocket) is deprecated. +See [golang/go/issues/18152](https://github.com/golang/go/issues/18152). + +The [net.Conn](https://pkg.go.dev/nhooyr.io/websocket#NetConn) can help in transitioning +to nhooyr.io/websocket. + +#### gobwas/ws + +[gobwas/ws](https://github.com/gobwas/ws) has an extremely flexible API that allows it to be used +in an event driven style for performance. See the author's [blog post](https://medium.freecodecamp.org/million-websockets-and-go-cc58418460bb). + +However when writing idiomatic Go, nhooyr.io/websocket will be faster and easier to use. diff --git a/vendor/nhooyr.io/websocket/accept.go b/vendor/nhooyr.io/websocket/accept.go new file mode 100644 index 00000000..18536bdb --- /dev/null +++ b/vendor/nhooyr.io/websocket/accept.go @@ -0,0 +1,370 @@ +// +build !js + +package websocket + +import ( + "bytes" + "crypto/sha1" + "encoding/base64" + "errors" + "fmt" + "io" + "log" + "net/http" + "net/textproto" + "net/url" + "path/filepath" + "strings" + + "nhooyr.io/websocket/internal/errd" +) + +// AcceptOptions represents Accept's options. +type AcceptOptions struct { + // Subprotocols lists the WebSocket subprotocols that Accept will negotiate with the client. + // The empty subprotocol will always be negotiated as per RFC 6455. If you would like to + // reject it, close the connection when c.Subprotocol() == "". + Subprotocols []string + + // InsecureSkipVerify is used to disable Accept's origin verification behaviour. + // + // You probably want to use OriginPatterns instead. + InsecureSkipVerify bool + + // OriginPatterns lists the host patterns for authorized origins. + // The request host is always authorized. + // Use this to enable cross origin WebSockets. + // + // i.e javascript running on example.com wants to access a WebSocket server at chat.example.com. + // In such a case, example.com is the origin and chat.example.com is the request host. + // One would set this field to []string{"example.com"} to authorize example.com to connect. + // + // Each pattern is matched case insensitively against the request origin host + // with filepath.Match. + // See https://golang.org/pkg/path/filepath/#Match + // + // Please ensure you understand the ramifications of enabling this. + // If used incorrectly your WebSocket server will be open to CSRF attacks. + // + // Do not use * as a pattern to allow any origin, prefer to use InsecureSkipVerify instead + // to bring attention to the danger of such a setting. + OriginPatterns []string + + // CompressionMode controls the compression mode. + // Defaults to CompressionNoContextTakeover. + // + // See docs on CompressionMode for details. + CompressionMode CompressionMode + + // CompressionThreshold controls the minimum size of a message before compression is applied. + // + // Defaults to 512 bytes for CompressionNoContextTakeover and 128 bytes + // for CompressionContextTakeover. + CompressionThreshold int +} + +// Accept accepts a WebSocket handshake from a client and upgrades the +// the connection to a WebSocket. +// +// Accept will not allow cross origin requests by default. +// See the InsecureSkipVerify and OriginPatterns options to allow cross origin requests. +// +// Accept will write a response to w on all errors. +func Accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (*Conn, error) { + return accept(w, r, opts) +} + +func accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (_ *Conn, err error) { + defer errd.Wrap(&err, "failed to accept WebSocket connection") + + if opts == nil { + opts = &AcceptOptions{} + } + opts = &*opts + + errCode, err := verifyClientRequest(w, r) + if err != nil { + http.Error(w, err.Error(), errCode) + return nil, err + } + + if !opts.InsecureSkipVerify { + err = authenticateOrigin(r, opts.OriginPatterns) + if err != nil { + if errors.Is(err, filepath.ErrBadPattern) { + log.Printf("websocket: %v", err) + err = errors.New(http.StatusText(http.StatusForbidden)) + } + http.Error(w, err.Error(), http.StatusForbidden) + return nil, err + } + } + + hj, ok := w.(http.Hijacker) + if !ok { + err = errors.New("http.ResponseWriter does not implement http.Hijacker") + http.Error(w, http.StatusText(http.StatusNotImplemented), http.StatusNotImplemented) + return nil, err + } + + w.Header().Set("Upgrade", "websocket") + w.Header().Set("Connection", "Upgrade") + + key := r.Header.Get("Sec-WebSocket-Key") + w.Header().Set("Sec-WebSocket-Accept", secWebSocketAccept(key)) + + subproto := selectSubprotocol(r, opts.Subprotocols) + if subproto != "" { + w.Header().Set("Sec-WebSocket-Protocol", subproto) + } + + copts, err := acceptCompression(r, w, opts.CompressionMode) + if err != nil { + return nil, err + } + + w.WriteHeader(http.StatusSwitchingProtocols) + // See https://github.com/nhooyr/websocket/issues/166 + if ginWriter, ok := w.(interface { + WriteHeaderNow() + }); ok { + ginWriter.WriteHeaderNow() + } + + netConn, brw, err := hj.Hijack() + if err != nil { + err = fmt.Errorf("failed to hijack connection: %w", err) + http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) + return nil, err + } + + // https://github.com/golang/go/issues/32314 + b, _ := brw.Reader.Peek(brw.Reader.Buffered()) + brw.Reader.Reset(io.MultiReader(bytes.NewReader(b), netConn)) + + return newConn(connConfig{ + subprotocol: w.Header().Get("Sec-WebSocket-Protocol"), + rwc: netConn, + client: false, + copts: copts, + flateThreshold: opts.CompressionThreshold, + + br: brw.Reader, + bw: brw.Writer, + }), nil +} + +func verifyClientRequest(w http.ResponseWriter, r *http.Request) (errCode int, _ error) { + if !r.ProtoAtLeast(1, 1) { + return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: handshake request must be at least HTTP/1.1: %q", r.Proto) + } + + if !headerContainsTokenIgnoreCase(r.Header, "Connection", "Upgrade") { + w.Header().Set("Connection", "Upgrade") + w.Header().Set("Upgrade", "websocket") + return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: Connection header %q does not contain Upgrade", r.Header.Get("Connection")) + } + + if !headerContainsTokenIgnoreCase(r.Header, "Upgrade", "websocket") { + w.Header().Set("Connection", "Upgrade") + w.Header().Set("Upgrade", "websocket") + return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: Upgrade header %q does not contain websocket", r.Header.Get("Upgrade")) + } + + if r.Method != "GET" { + return http.StatusMethodNotAllowed, fmt.Errorf("WebSocket protocol violation: handshake request method is not GET but %q", r.Method) + } + + if r.Header.Get("Sec-WebSocket-Version") != "13" { + w.Header().Set("Sec-WebSocket-Version", "13") + return http.StatusBadRequest, fmt.Errorf("unsupported WebSocket protocol version (only 13 is supported): %q", r.Header.Get("Sec-WebSocket-Version")) + } + + if r.Header.Get("Sec-WebSocket-Key") == "" { + return http.StatusBadRequest, errors.New("WebSocket protocol violation: missing Sec-WebSocket-Key") + } + + return 0, nil +} + +func authenticateOrigin(r *http.Request, originHosts []string) error { + origin := r.Header.Get("Origin") + if origin == "" { + return nil + } + + u, err := url.Parse(origin) + if err != nil { + return fmt.Errorf("failed to parse Origin header %q: %w", origin, err) + } + + if strings.EqualFold(r.Host, u.Host) { + return nil + } + + for _, hostPattern := range originHosts { + matched, err := match(hostPattern, u.Host) + if err != nil { + return fmt.Errorf("failed to parse filepath pattern %q: %w", hostPattern, err) + } + if matched { + return nil + } + } + return fmt.Errorf("request Origin %q is not authorized for Host %q", origin, r.Host) +} + +func match(pattern, s string) (bool, error) { + return filepath.Match(strings.ToLower(pattern), strings.ToLower(s)) +} + +func selectSubprotocol(r *http.Request, subprotocols []string) string { + cps := headerTokens(r.Header, "Sec-WebSocket-Protocol") + for _, sp := range subprotocols { + for _, cp := range cps { + if strings.EqualFold(sp, cp) { + return cp + } + } + } + return "" +} + +func acceptCompression(r *http.Request, w http.ResponseWriter, mode CompressionMode) (*compressionOptions, error) { + if mode == CompressionDisabled { + return nil, nil + } + + for _, ext := range websocketExtensions(r.Header) { + switch ext.name { + case "permessage-deflate": + return acceptDeflate(w, ext, mode) + // Disabled for now, see https://github.com/nhooyr/websocket/issues/218 + // case "x-webkit-deflate-frame": + // return acceptWebkitDeflate(w, ext, mode) + } + } + return nil, nil +} + +func acceptDeflate(w http.ResponseWriter, ext websocketExtension, mode CompressionMode) (*compressionOptions, error) { + copts := mode.opts() + + for _, p := range ext.params { + switch p { + case "client_no_context_takeover": + copts.clientNoContextTakeover = true + continue + case "server_no_context_takeover": + copts.serverNoContextTakeover = true + continue + } + + if strings.HasPrefix(p, "client_max_window_bits") { + // We cannot adjust the read sliding window so cannot make use of this. + continue + } + + err := fmt.Errorf("unsupported permessage-deflate parameter: %q", p) + http.Error(w, err.Error(), http.StatusBadRequest) + return nil, err + } + + copts.setHeader(w.Header()) + + return copts, nil +} + +func acceptWebkitDeflate(w http.ResponseWriter, ext websocketExtension, mode CompressionMode) (*compressionOptions, error) { + copts := mode.opts() + // The peer must explicitly request it. + copts.serverNoContextTakeover = false + + for _, p := range ext.params { + if p == "no_context_takeover" { + copts.serverNoContextTakeover = true + continue + } + + // We explicitly fail on x-webkit-deflate-frame's max_window_bits parameter instead + // of ignoring it as the draft spec is unclear. It says the server can ignore it + // but the server has no way of signalling to the client it was ignored as the parameters + // are set one way. + // Thus us ignoring it would make the client think we understood it which would cause issues. + // See https://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate-06#section-4.1 + // + // Either way, we're only implementing this for webkit which never sends the max_window_bits + // parameter so we don't need to worry about it. + err := fmt.Errorf("unsupported x-webkit-deflate-frame parameter: %q", p) + http.Error(w, err.Error(), http.StatusBadRequest) + return nil, err + } + + s := "x-webkit-deflate-frame" + if copts.clientNoContextTakeover { + s += "; no_context_takeover" + } + w.Header().Set("Sec-WebSocket-Extensions", s) + + return copts, nil +} + +func headerContainsTokenIgnoreCase(h http.Header, key, token string) bool { + for _, t := range headerTokens(h, key) { + if strings.EqualFold(t, token) { + return true + } + } + return false +} + +type websocketExtension struct { + name string + params []string +} + +func websocketExtensions(h http.Header) []websocketExtension { + var exts []websocketExtension + extStrs := headerTokens(h, "Sec-WebSocket-Extensions") + for _, extStr := range extStrs { + if extStr == "" { + continue + } + + vals := strings.Split(extStr, ";") + for i := range vals { + vals[i] = strings.TrimSpace(vals[i]) + } + + e := websocketExtension{ + name: vals[0], + params: vals[1:], + } + + exts = append(exts, e) + } + return exts +} + +func headerTokens(h http.Header, key string) []string { + key = textproto.CanonicalMIMEHeaderKey(key) + var tokens []string + for _, v := range h[key] { + v = strings.TrimSpace(v) + for _, t := range strings.Split(v, ",") { + t = strings.TrimSpace(t) + tokens = append(tokens, t) + } + } + return tokens +} + +var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +func secWebSocketAccept(secWebSocketKey string) string { + h := sha1.New() + h.Write([]byte(secWebSocketKey)) + h.Write(keyGUID) + + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} diff --git a/vendor/nhooyr.io/websocket/accept_js.go b/vendor/nhooyr.io/websocket/accept_js.go new file mode 100644 index 00000000..daad4b79 --- /dev/null +++ b/vendor/nhooyr.io/websocket/accept_js.go @@ -0,0 +1,20 @@ +package websocket + +import ( + "errors" + "net/http" +) + +// AcceptOptions represents Accept's options. +type AcceptOptions struct { + Subprotocols []string + InsecureSkipVerify bool + OriginPatterns []string + CompressionMode CompressionMode + CompressionThreshold int +} + +// Accept is stubbed out for Wasm. +func Accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (*Conn, error) { + return nil, errors.New("unimplemented") +} diff --git a/vendor/nhooyr.io/websocket/close.go b/vendor/nhooyr.io/websocket/close.go new file mode 100644 index 00000000..7cbc19e9 --- /dev/null +++ b/vendor/nhooyr.io/websocket/close.go @@ -0,0 +1,76 @@ +package websocket + +import ( + "errors" + "fmt" +) + +// StatusCode represents a WebSocket status code. +// https://tools.ietf.org/html/rfc6455#section-7.4 +type StatusCode int + +// https://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number +// +// These are only the status codes defined by the protocol. +// +// You can define custom codes in the 3000-4999 range. +// The 3000-3999 range is reserved for use by libraries, frameworks and applications. +// The 4000-4999 range is reserved for private use. +const ( + StatusNormalClosure StatusCode = 1000 + StatusGoingAway StatusCode = 1001 + StatusProtocolError StatusCode = 1002 + StatusUnsupportedData StatusCode = 1003 + + // 1004 is reserved and so unexported. + statusReserved StatusCode = 1004 + + // StatusNoStatusRcvd cannot be sent in a close message. + // It is reserved for when a close message is received without + // a status code. + StatusNoStatusRcvd StatusCode = 1005 + + // StatusAbnormalClosure is exported for use only with Wasm. + // In non Wasm Go, the returned error will indicate whether the + // connection was closed abnormally. + StatusAbnormalClosure StatusCode = 1006 + + StatusInvalidFramePayloadData StatusCode = 1007 + StatusPolicyViolation StatusCode = 1008 + StatusMessageTooBig StatusCode = 1009 + StatusMandatoryExtension StatusCode = 1010 + StatusInternalError StatusCode = 1011 + StatusServiceRestart StatusCode = 1012 + StatusTryAgainLater StatusCode = 1013 + StatusBadGateway StatusCode = 1014 + + // StatusTLSHandshake is only exported for use with Wasm. + // In non Wasm Go, the returned error will indicate whether there was + // a TLS handshake failure. + StatusTLSHandshake StatusCode = 1015 +) + +// CloseError is returned when the connection is closed with a status and reason. +// +// Use Go 1.13's errors.As to check for this error. +// Also see the CloseStatus helper. +type CloseError struct { + Code StatusCode + Reason string +} + +func (ce CloseError) Error() string { + return fmt.Sprintf("status = %v and reason = %q", ce.Code, ce.Reason) +} + +// CloseStatus is a convenience wrapper around Go 1.13's errors.As to grab +// the status code from a CloseError. +// +// -1 will be returned if the passed error is nil or not a CloseError. +func CloseStatus(err error) StatusCode { + var ce CloseError + if errors.As(err, &ce) { + return ce.Code + } + return -1 +} diff --git a/vendor/nhooyr.io/websocket/close_notjs.go b/vendor/nhooyr.io/websocket/close_notjs.go new file mode 100644 index 00000000..4251311d --- /dev/null +++ b/vendor/nhooyr.io/websocket/close_notjs.go @@ -0,0 +1,211 @@ +// +build !js + +package websocket + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "log" + "time" + + "nhooyr.io/websocket/internal/errd" +) + +// Close performs the WebSocket close handshake with the given status code and reason. +// +// It will write a WebSocket close frame with a timeout of 5s and then wait 5s for +// the peer to send a close frame. +// All data messages received from the peer during the close handshake will be discarded. +// +// The connection can only be closed once. Additional calls to Close +// are no-ops. +// +// The maximum length of reason must be 125 bytes. Avoid +// sending a dynamic reason. +// +// Close will unblock all goroutines interacting with the connection once +// complete. +func (c *Conn) Close(code StatusCode, reason string) error { + return c.closeHandshake(code, reason) +} + +func (c *Conn) closeHandshake(code StatusCode, reason string) (err error) { + defer errd.Wrap(&err, "failed to close WebSocket") + + writeErr := c.writeClose(code, reason) + closeHandshakeErr := c.waitCloseHandshake() + + if writeErr != nil { + return writeErr + } + + if CloseStatus(closeHandshakeErr) == -1 { + return closeHandshakeErr + } + + return nil +} + +var errAlreadyWroteClose = errors.New("already wrote close") + +func (c *Conn) writeClose(code StatusCode, reason string) error { + c.closeMu.Lock() + wroteClose := c.wroteClose + c.wroteClose = true + c.closeMu.Unlock() + if wroteClose { + return errAlreadyWroteClose + } + + ce := CloseError{ + Code: code, + Reason: reason, + } + + var p []byte + var marshalErr error + if ce.Code != StatusNoStatusRcvd { + p, marshalErr = ce.bytes() + if marshalErr != nil { + log.Printf("websocket: %v", marshalErr) + } + } + + writeErr := c.writeControl(context.Background(), opClose, p) + if CloseStatus(writeErr) != -1 { + // Not a real error if it's due to a close frame being received. + writeErr = nil + } + + // We do this after in case there was an error writing the close frame. + c.setCloseErr(fmt.Errorf("sent close frame: %w", ce)) + + if marshalErr != nil { + return marshalErr + } + return writeErr +} + +func (c *Conn) waitCloseHandshake() error { + defer c.close(nil) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + err := c.readMu.lock(ctx) + if err != nil { + return err + } + defer c.readMu.unlock() + + if c.readCloseFrameErr != nil { + return c.readCloseFrameErr + } + + for { + h, err := c.readLoop(ctx) + if err != nil { + return err + } + + for i := int64(0); i < h.payloadLength; i++ { + _, err := c.br.ReadByte() + if err != nil { + return err + } + } + } +} + +func parseClosePayload(p []byte) (CloseError, error) { + if len(p) == 0 { + return CloseError{ + Code: StatusNoStatusRcvd, + }, nil + } + + if len(p) < 2 { + return CloseError{}, fmt.Errorf("close payload %q too small, cannot even contain the 2 byte status code", p) + } + + ce := CloseError{ + Code: StatusCode(binary.BigEndian.Uint16(p)), + Reason: string(p[2:]), + } + + if !validWireCloseCode(ce.Code) { + return CloseError{}, fmt.Errorf("invalid status code %v", ce.Code) + } + + return ce, nil +} + +// See http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number +// and https://tools.ietf.org/html/rfc6455#section-7.4.1 +func validWireCloseCode(code StatusCode) bool { + switch code { + case statusReserved, StatusNoStatusRcvd, StatusAbnormalClosure, StatusTLSHandshake: + return false + } + + if code >= StatusNormalClosure && code <= StatusBadGateway { + return true + } + if code >= 3000 && code <= 4999 { + return true + } + + return false +} + +func (ce CloseError) bytes() ([]byte, error) { + p, err := ce.bytesErr() + if err != nil { + err = fmt.Errorf("failed to marshal close frame: %w", err) + ce = CloseError{ + Code: StatusInternalError, + } + p, _ = ce.bytesErr() + } + return p, err +} + +const maxCloseReason = maxControlPayload - 2 + +func (ce CloseError) bytesErr() ([]byte, error) { + if len(ce.Reason) > maxCloseReason { + return nil, fmt.Errorf("reason string max is %v but got %q with length %v", maxCloseReason, ce.Reason, len(ce.Reason)) + } + + if !validWireCloseCode(ce.Code) { + return nil, fmt.Errorf("status code %v cannot be set", ce.Code) + } + + buf := make([]byte, 2+len(ce.Reason)) + binary.BigEndian.PutUint16(buf, uint16(ce.Code)) + copy(buf[2:], ce.Reason) + return buf, nil +} + +func (c *Conn) setCloseErr(err error) { + c.closeMu.Lock() + c.setCloseErrLocked(err) + c.closeMu.Unlock() +} + +func (c *Conn) setCloseErrLocked(err error) { + if c.closeErr == nil { + c.closeErr = fmt.Errorf("WebSocket closed: %w", err) + } +} + +func (c *Conn) isClosed() bool { + select { + case <-c.closed: + return true + default: + return false + } +} diff --git a/vendor/nhooyr.io/websocket/compress.go b/vendor/nhooyr.io/websocket/compress.go new file mode 100644 index 00000000..80b46d1c --- /dev/null +++ b/vendor/nhooyr.io/websocket/compress.go @@ -0,0 +1,39 @@ +package websocket + +// CompressionMode represents the modes available to the deflate extension. +// See https://tools.ietf.org/html/rfc7692 +// +// A compatibility layer is implemented for the older deflate-frame extension used +// by safari. See https://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate-06 +// It will work the same in every way except that we cannot signal to the peer we +// want to use no context takeover on our side, we can only signal that they should. +// It is however currently disabled due to Safari bugs. See https://github.com/nhooyr/websocket/issues/218 +type CompressionMode int + +const ( + // CompressionNoContextTakeover grabs a new flate.Reader and flate.Writer as needed + // for every message. This applies to both server and client side. + // + // This means less efficient compression as the sliding window from previous messages + // will not be used but the memory overhead will be lower if the connections + // are long lived and seldom used. + // + // The message will only be compressed if greater than 512 bytes. + CompressionNoContextTakeover CompressionMode = iota + + // CompressionContextTakeover uses a flate.Reader and flate.Writer per connection. + // This enables reusing the sliding window from previous messages. + // As most WebSocket protocols are repetitive, this can be very efficient. + // It carries an overhead of 8 kB for every connection compared to CompressionNoContextTakeover. + // + // If the peer negotiates NoContextTakeover on the client or server side, it will be + // used instead as this is required by the RFC. + CompressionContextTakeover + + // CompressionDisabled disables the deflate extension. + // + // Use this if you are using a predominantly binary protocol with very + // little duplication in between messages or CPU and memory are more + // important than bandwidth. + CompressionDisabled +) diff --git a/vendor/nhooyr.io/websocket/compress_notjs.go b/vendor/nhooyr.io/websocket/compress_notjs.go new file mode 100644 index 00000000..809a272c --- /dev/null +++ b/vendor/nhooyr.io/websocket/compress_notjs.go @@ -0,0 +1,181 @@ +// +build !js + +package websocket + +import ( + "io" + "net/http" + "sync" + + "github.com/klauspost/compress/flate" +) + +func (m CompressionMode) opts() *compressionOptions { + return &compressionOptions{ + clientNoContextTakeover: m == CompressionNoContextTakeover, + serverNoContextTakeover: m == CompressionNoContextTakeover, + } +} + +type compressionOptions struct { + clientNoContextTakeover bool + serverNoContextTakeover bool +} + +func (copts *compressionOptions) setHeader(h http.Header) { + s := "permessage-deflate" + if copts.clientNoContextTakeover { + s += "; client_no_context_takeover" + } + if copts.serverNoContextTakeover { + s += "; server_no_context_takeover" + } + h.Set("Sec-WebSocket-Extensions", s) +} + +// These bytes are required to get flate.Reader to return. +// They are removed when sending to avoid the overhead as +// WebSocket framing tell's when the message has ended but then +// we need to add them back otherwise flate.Reader keeps +// trying to return more bytes. +const deflateMessageTail = "\x00\x00\xff\xff" + +type trimLastFourBytesWriter struct { + w io.Writer + tail []byte +} + +func (tw *trimLastFourBytesWriter) reset() { + if tw != nil && tw.tail != nil { + tw.tail = tw.tail[:0] + } +} + +func (tw *trimLastFourBytesWriter) Write(p []byte) (int, error) { + if tw.tail == nil { + tw.tail = make([]byte, 0, 4) + } + + extra := len(tw.tail) + len(p) - 4 + + if extra <= 0 { + tw.tail = append(tw.tail, p...) + return len(p), nil + } + + // Now we need to write as many extra bytes as we can from the previous tail. + if extra > len(tw.tail) { + extra = len(tw.tail) + } + if extra > 0 { + _, err := tw.w.Write(tw.tail[:extra]) + if err != nil { + return 0, err + } + + // Shift remaining bytes in tail over. + n := copy(tw.tail, tw.tail[extra:]) + tw.tail = tw.tail[:n] + } + + // If p is less than or equal to 4 bytes, + // all of it is is part of the tail. + if len(p) <= 4 { + tw.tail = append(tw.tail, p...) + return len(p), nil + } + + // Otherwise, only the last 4 bytes are. + tw.tail = append(tw.tail, p[len(p)-4:]...) + + p = p[:len(p)-4] + n, err := tw.w.Write(p) + return n + 4, err +} + +var flateReaderPool sync.Pool + +func getFlateReader(r io.Reader, dict []byte) io.Reader { + fr, ok := flateReaderPool.Get().(io.Reader) + if !ok { + return flate.NewReaderDict(r, dict) + } + fr.(flate.Resetter).Reset(r, dict) + return fr +} + +func putFlateReader(fr io.Reader) { + flateReaderPool.Put(fr) +} + +type slidingWindow struct { + buf []byte +} + +var swPoolMu sync.RWMutex +var swPool = map[int]*sync.Pool{} + +func slidingWindowPool(n int) *sync.Pool { + swPoolMu.RLock() + p, ok := swPool[n] + swPoolMu.RUnlock() + if ok { + return p + } + + p = &sync.Pool{} + + swPoolMu.Lock() + swPool[n] = p + swPoolMu.Unlock() + + return p +} + +func (sw *slidingWindow) init(n int) { + if sw.buf != nil { + return + } + + if n == 0 { + n = 32768 + } + + p := slidingWindowPool(n) + buf, ok := p.Get().([]byte) + if ok { + sw.buf = buf[:0] + } else { + sw.buf = make([]byte, 0, n) + } +} + +func (sw *slidingWindow) close() { + if sw.buf == nil { + return + } + + swPoolMu.Lock() + swPool[cap(sw.buf)].Put(sw.buf) + swPoolMu.Unlock() + sw.buf = nil +} + +func (sw *slidingWindow) write(p []byte) { + if len(p) >= cap(sw.buf) { + sw.buf = sw.buf[:cap(sw.buf)] + p = p[len(p)-cap(sw.buf):] + copy(sw.buf, p) + return + } + + left := cap(sw.buf) - len(sw.buf) + if left < len(p) { + // We need to shift spaceNeeded bytes from the end to make room for p at the end. + spaceNeeded := len(p) - left + copy(sw.buf, sw.buf[spaceNeeded:]) + sw.buf = sw.buf[:len(sw.buf)-spaceNeeded] + } + + sw.buf = append(sw.buf, p...) +} diff --git a/vendor/nhooyr.io/websocket/conn.go b/vendor/nhooyr.io/websocket/conn.go new file mode 100644 index 00000000..a41808be --- /dev/null +++ b/vendor/nhooyr.io/websocket/conn.go @@ -0,0 +1,13 @@ +package websocket + +// MessageType represents the type of a WebSocket message. +// See https://tools.ietf.org/html/rfc6455#section-5.6 +type MessageType int + +// MessageType constants. +const ( + // MessageText is for UTF-8 encoded text messages like JSON. + MessageText MessageType = iota + 1 + // MessageBinary is for binary messages like protobufs. + MessageBinary +) diff --git a/vendor/nhooyr.io/websocket/conn_notjs.go b/vendor/nhooyr.io/websocket/conn_notjs.go new file mode 100644 index 00000000..0c85ab77 --- /dev/null +++ b/vendor/nhooyr.io/websocket/conn_notjs.go @@ -0,0 +1,265 @@ +// +build !js + +package websocket + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "runtime" + "strconv" + "sync" + "sync/atomic" +) + +// Conn represents a WebSocket connection. +// All methods may be called concurrently except for Reader and Read. +// +// You must always read from the connection. Otherwise control +// frames will not be handled. See Reader and CloseRead. +// +// Be sure to call Close on the connection when you +// are finished with it to release associated resources. +// +// On any error from any method, the connection is closed +// with an appropriate reason. +type Conn struct { + subprotocol string + rwc io.ReadWriteCloser + client bool + copts *compressionOptions + flateThreshold int + br *bufio.Reader + bw *bufio.Writer + + readTimeout chan context.Context + writeTimeout chan context.Context + + // Read state. + readMu *mu + readHeaderBuf [8]byte + readControlBuf [maxControlPayload]byte + msgReader *msgReader + readCloseFrameErr error + + // Write state. + msgWriterState *msgWriterState + writeFrameMu *mu + writeBuf []byte + writeHeaderBuf [8]byte + writeHeader header + + closed chan struct{} + closeMu sync.Mutex + closeErr error + wroteClose bool + + pingCounter int32 + activePingsMu sync.Mutex + activePings map[string]chan<- struct{} +} + +type connConfig struct { + subprotocol string + rwc io.ReadWriteCloser + client bool + copts *compressionOptions + flateThreshold int + + br *bufio.Reader + bw *bufio.Writer +} + +func newConn(cfg connConfig) *Conn { + c := &Conn{ + subprotocol: cfg.subprotocol, + rwc: cfg.rwc, + client: cfg.client, + copts: cfg.copts, + flateThreshold: cfg.flateThreshold, + + br: cfg.br, + bw: cfg.bw, + + readTimeout: make(chan context.Context), + writeTimeout: make(chan context.Context), + + closed: make(chan struct{}), + activePings: make(map[string]chan<- struct{}), + } + + c.readMu = newMu(c) + c.writeFrameMu = newMu(c) + + c.msgReader = newMsgReader(c) + + c.msgWriterState = newMsgWriterState(c) + if c.client { + c.writeBuf = extractBufioWriterBuf(c.bw, c.rwc) + } + + if c.flate() && c.flateThreshold == 0 { + c.flateThreshold = 128 + if !c.msgWriterState.flateContextTakeover() { + c.flateThreshold = 512 + } + } + + runtime.SetFinalizer(c, func(c *Conn) { + c.close(errors.New("connection garbage collected")) + }) + + go c.timeoutLoop() + + return c +} + +// Subprotocol returns the negotiated subprotocol. +// An empty string means the default protocol. +func (c *Conn) Subprotocol() string { + return c.subprotocol +} + +func (c *Conn) close(err error) { + c.closeMu.Lock() + defer c.closeMu.Unlock() + + if c.isClosed() { + return + } + c.setCloseErrLocked(err) + close(c.closed) + runtime.SetFinalizer(c, nil) + + // Have to close after c.closed is closed to ensure any goroutine that wakes up + // from the connection being closed also sees that c.closed is closed and returns + // closeErr. + c.rwc.Close() + + go func() { + c.msgWriterState.close() + + c.msgReader.close() + }() +} + +func (c *Conn) timeoutLoop() { + readCtx := context.Background() + writeCtx := context.Background() + + for { + select { + case <-c.closed: + return + + case writeCtx = <-c.writeTimeout: + case readCtx = <-c.readTimeout: + + case <-readCtx.Done(): + c.setCloseErr(fmt.Errorf("read timed out: %w", readCtx.Err())) + go c.writeError(StatusPolicyViolation, errors.New("timed out")) + case <-writeCtx.Done(): + c.close(fmt.Errorf("write timed out: %w", writeCtx.Err())) + return + } + } +} + +func (c *Conn) flate() bool { + return c.copts != nil +} + +// Ping sends a ping to the peer and waits for a pong. +// Use this to measure latency or ensure the peer is responsive. +// Ping must be called concurrently with Reader as it does +// not read from the connection but instead waits for a Reader call +// to read the pong. +// +// TCP Keepalives should suffice for most use cases. +func (c *Conn) Ping(ctx context.Context) error { + p := atomic.AddInt32(&c.pingCounter, 1) + + err := c.ping(ctx, strconv.Itoa(int(p))) + if err != nil { + return fmt.Errorf("failed to ping: %w", err) + } + return nil +} + +func (c *Conn) ping(ctx context.Context, p string) error { + pong := make(chan struct{}, 1) + + c.activePingsMu.Lock() + c.activePings[p] = pong + c.activePingsMu.Unlock() + + defer func() { + c.activePingsMu.Lock() + delete(c.activePings, p) + c.activePingsMu.Unlock() + }() + + err := c.writeControl(ctx, opPing, []byte(p)) + if err != nil { + return err + } + + select { + case <-c.closed: + return c.closeErr + case <-ctx.Done(): + err := fmt.Errorf("failed to wait for pong: %w", ctx.Err()) + c.close(err) + return err + case <-pong: + return nil + } +} + +type mu struct { + c *Conn + ch chan struct{} +} + +func newMu(c *Conn) *mu { + return &mu{ + c: c, + ch: make(chan struct{}, 1), + } +} + +func (m *mu) forceLock() { + m.ch <- struct{}{} +} + +func (m *mu) lock(ctx context.Context) error { + select { + case <-m.c.closed: + return m.c.closeErr + case <-ctx.Done(): + err := fmt.Errorf("failed to acquire lock: %w", ctx.Err()) + m.c.close(err) + return err + case m.ch <- struct{}{}: + // To make sure the connection is certainly alive. + // As it's possible the send on m.ch was selected + // over the receive on closed. + select { + case <-m.c.closed: + // Make sure to release. + m.unlock() + return m.c.closeErr + default: + } + return nil + } +} + +func (m *mu) unlock() { + select { + case <-m.ch: + default: + } +} diff --git a/vendor/nhooyr.io/websocket/dial.go b/vendor/nhooyr.io/websocket/dial.go new file mode 100644 index 00000000..7a7787ff --- /dev/null +++ b/vendor/nhooyr.io/websocket/dial.go @@ -0,0 +1,292 @@ +// +build !js + +package websocket + +import ( + "bufio" + "bytes" + "context" + "crypto/rand" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "nhooyr.io/websocket/internal/errd" +) + +// DialOptions represents Dial's options. +type DialOptions struct { + // HTTPClient is used for the connection. + // Its Transport must return writable bodies for WebSocket handshakes. + // http.Transport does beginning with Go 1.12. + HTTPClient *http.Client + + // HTTPHeader specifies the HTTP headers included in the handshake request. + HTTPHeader http.Header + + // Subprotocols lists the WebSocket subprotocols to negotiate with the server. + Subprotocols []string + + // CompressionMode controls the compression mode. + // Defaults to CompressionNoContextTakeover. + // + // See docs on CompressionMode for details. + CompressionMode CompressionMode + + // CompressionThreshold controls the minimum size of a message before compression is applied. + // + // Defaults to 512 bytes for CompressionNoContextTakeover and 128 bytes + // for CompressionContextTakeover. + CompressionThreshold int +} + +// Dial performs a WebSocket handshake on url. +// +// The response is the WebSocket handshake response from the server. +// You never need to close resp.Body yourself. +// +// If an error occurs, the returned response may be non nil. +// However, you can only read the first 1024 bytes of the body. +// +// This function requires at least Go 1.12 as it uses a new feature +// in net/http to perform WebSocket handshakes. +// See docs on the HTTPClient option and https://github.com/golang/go/issues/26937#issuecomment-415855861 +// +// URLs with http/https schemes will work and are interpreted as ws/wss. +func Dial(ctx context.Context, u string, opts *DialOptions) (*Conn, *http.Response, error) { + return dial(ctx, u, opts, nil) +} + +func dial(ctx context.Context, urls string, opts *DialOptions, rand io.Reader) (_ *Conn, _ *http.Response, err error) { + defer errd.Wrap(&err, "failed to WebSocket dial") + + if opts == nil { + opts = &DialOptions{} + } + + opts = &*opts + if opts.HTTPClient == nil { + opts.HTTPClient = http.DefaultClient + } else if opts.HTTPClient.Timeout > 0 { + var cancel context.CancelFunc + + ctx, cancel = context.WithTimeout(ctx, opts.HTTPClient.Timeout) + defer cancel() + + newClient := *opts.HTTPClient + newClient.Timeout = 0 + opts.HTTPClient = &newClient + } + + if opts.HTTPHeader == nil { + opts.HTTPHeader = http.Header{} + } + + secWebSocketKey, err := secWebSocketKey(rand) + if err != nil { + return nil, nil, fmt.Errorf("failed to generate Sec-WebSocket-Key: %w", err) + } + + var copts *compressionOptions + if opts.CompressionMode != CompressionDisabled { + copts = opts.CompressionMode.opts() + } + + resp, err := handshakeRequest(ctx, urls, opts, copts, secWebSocketKey) + if err != nil { + return nil, resp, err + } + respBody := resp.Body + resp.Body = nil + defer func() { + if err != nil { + // We read a bit of the body for easier debugging. + r := io.LimitReader(respBody, 1024) + + timer := time.AfterFunc(time.Second*3, func() { + respBody.Close() + }) + defer timer.Stop() + + b, _ := ioutil.ReadAll(r) + respBody.Close() + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + }() + + copts, err = verifyServerResponse(opts, copts, secWebSocketKey, resp) + if err != nil { + return nil, resp, err + } + + rwc, ok := respBody.(io.ReadWriteCloser) + if !ok { + return nil, resp, fmt.Errorf("response body is not a io.ReadWriteCloser: %T", respBody) + } + + return newConn(connConfig{ + subprotocol: resp.Header.Get("Sec-WebSocket-Protocol"), + rwc: rwc, + client: true, + copts: copts, + flateThreshold: opts.CompressionThreshold, + br: getBufioReader(rwc), + bw: getBufioWriter(rwc), + }), resp, nil +} + +func handshakeRequest(ctx context.Context, urls string, opts *DialOptions, copts *compressionOptions, secWebSocketKey string) (*http.Response, error) { + u, err := url.Parse(urls) + if err != nil { + return nil, fmt.Errorf("failed to parse url: %w", err) + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + case "http", "https": + default: + return nil, fmt.Errorf("unexpected url scheme: %q", u.Scheme) + } + + req, _ := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + req.Header = opts.HTTPHeader.Clone() + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", "websocket") + req.Header.Set("Sec-WebSocket-Version", "13") + req.Header.Set("Sec-WebSocket-Key", secWebSocketKey) + if len(opts.Subprotocols) > 0 { + req.Header.Set("Sec-WebSocket-Protocol", strings.Join(opts.Subprotocols, ",")) + } + if copts != nil { + copts.setHeader(req.Header) + } + + resp, err := opts.HTTPClient.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to send handshake request: %w", err) + } + return resp, nil +} + +func secWebSocketKey(rr io.Reader) (string, error) { + if rr == nil { + rr = rand.Reader + } + b := make([]byte, 16) + _, err := io.ReadFull(rr, b) + if err != nil { + return "", fmt.Errorf("failed to read random data from rand.Reader: %w", err) + } + return base64.StdEncoding.EncodeToString(b), nil +} + +func verifyServerResponse(opts *DialOptions, copts *compressionOptions, secWebSocketKey string, resp *http.Response) (*compressionOptions, error) { + if resp.StatusCode != http.StatusSwitchingProtocols { + return nil, fmt.Errorf("expected handshake response status code %v but got %v", http.StatusSwitchingProtocols, resp.StatusCode) + } + + if !headerContainsTokenIgnoreCase(resp.Header, "Connection", "Upgrade") { + return nil, fmt.Errorf("WebSocket protocol violation: Connection header %q does not contain Upgrade", resp.Header.Get("Connection")) + } + + if !headerContainsTokenIgnoreCase(resp.Header, "Upgrade", "WebSocket") { + return nil, fmt.Errorf("WebSocket protocol violation: Upgrade header %q does not contain websocket", resp.Header.Get("Upgrade")) + } + + if resp.Header.Get("Sec-WebSocket-Accept") != secWebSocketAccept(secWebSocketKey) { + return nil, fmt.Errorf("WebSocket protocol violation: invalid Sec-WebSocket-Accept %q, key %q", + resp.Header.Get("Sec-WebSocket-Accept"), + secWebSocketKey, + ) + } + + err := verifySubprotocol(opts.Subprotocols, resp) + if err != nil { + return nil, err + } + + return verifyServerExtensions(copts, resp.Header) +} + +func verifySubprotocol(subprotos []string, resp *http.Response) error { + proto := resp.Header.Get("Sec-WebSocket-Protocol") + if proto == "" { + return nil + } + + for _, sp2 := range subprotos { + if strings.EqualFold(sp2, proto) { + return nil + } + } + + return fmt.Errorf("WebSocket protocol violation: unexpected Sec-WebSocket-Protocol from server: %q", proto) +} + +func verifyServerExtensions(copts *compressionOptions, h http.Header) (*compressionOptions, error) { + exts := websocketExtensions(h) + if len(exts) == 0 { + return nil, nil + } + + ext := exts[0] + if ext.name != "permessage-deflate" || len(exts) > 1 || copts == nil { + return nil, fmt.Errorf("WebSocket protcol violation: unsupported extensions from server: %+v", exts[1:]) + } + + copts = &*copts + + for _, p := range ext.params { + switch p { + case "client_no_context_takeover": + copts.clientNoContextTakeover = true + continue + case "server_no_context_takeover": + copts.serverNoContextTakeover = true + continue + } + + return nil, fmt.Errorf("unsupported permessage-deflate parameter: %q", p) + } + + return copts, nil +} + +var bufioReaderPool sync.Pool + +func getBufioReader(r io.Reader) *bufio.Reader { + br, ok := bufioReaderPool.Get().(*bufio.Reader) + if !ok { + return bufio.NewReader(r) + } + br.Reset(r) + return br +} + +func putBufioReader(br *bufio.Reader) { + bufioReaderPool.Put(br) +} + +var bufioWriterPool sync.Pool + +func getBufioWriter(w io.Writer) *bufio.Writer { + bw, ok := bufioWriterPool.Get().(*bufio.Writer) + if !ok { + return bufio.NewWriter(w) + } + bw.Reset(w) + return bw +} + +func putBufioWriter(bw *bufio.Writer) { + bufioWriterPool.Put(bw) +} diff --git a/vendor/nhooyr.io/websocket/doc.go b/vendor/nhooyr.io/websocket/doc.go new file mode 100644 index 00000000..efa920e3 --- /dev/null +++ b/vendor/nhooyr.io/websocket/doc.go @@ -0,0 +1,32 @@ +// +build !js + +// Package websocket implements the RFC 6455 WebSocket protocol. +// +// https://tools.ietf.org/html/rfc6455 +// +// Use Dial to dial a WebSocket server. +// +// Use Accept to accept a WebSocket client. +// +// Conn represents the resulting WebSocket connection. +// +// The examples are the best way to understand how to correctly use the library. +// +// The wsjson and wspb subpackages contain helpers for JSON and protobuf messages. +// +// More documentation at https://nhooyr.io/websocket. +// +// Wasm +// +// The client side supports compiling to Wasm. +// It wraps the WebSocket browser API. +// +// See https://developer.mozilla.org/en-US/docs/Web/API/WebSocket +// +// Some important caveats to be aware of: +// +// - Accept always errors out +// - Conn.Ping is no-op +// - HTTPClient, HTTPHeader and CompressionMode in DialOptions are no-op +// - *http.Response from Dial is &http.Response{} with a 101 status code on success +package websocket // import "nhooyr.io/websocket" diff --git a/vendor/nhooyr.io/websocket/frame.go b/vendor/nhooyr.io/websocket/frame.go new file mode 100644 index 00000000..2a036f94 --- /dev/null +++ b/vendor/nhooyr.io/websocket/frame.go @@ -0,0 +1,294 @@ +package websocket + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" + "math" + "math/bits" + + "nhooyr.io/websocket/internal/errd" +) + +// opcode represents a WebSocket opcode. +type opcode int + +// https://tools.ietf.org/html/rfc6455#section-11.8. +const ( + opContinuation opcode = iota + opText + opBinary + // 3 - 7 are reserved for further non-control frames. + _ + _ + _ + _ + _ + opClose + opPing + opPong + // 11-16 are reserved for further control frames. +) + +// header represents a WebSocket frame header. +// See https://tools.ietf.org/html/rfc6455#section-5.2. +type header struct { + fin bool + rsv1 bool + rsv2 bool + rsv3 bool + opcode opcode + + payloadLength int64 + + masked bool + maskKey uint32 +} + +// readFrameHeader reads a header from the reader. +// See https://tools.ietf.org/html/rfc6455#section-5.2. +func readFrameHeader(r *bufio.Reader, readBuf []byte) (h header, err error) { + defer errd.Wrap(&err, "failed to read frame header") + + b, err := r.ReadByte() + if err != nil { + return header{}, err + } + + h.fin = b&(1<<7) != 0 + h.rsv1 = b&(1<<6) != 0 + h.rsv2 = b&(1<<5) != 0 + h.rsv3 = b&(1<<4) != 0 + + h.opcode = opcode(b & 0xf) + + b, err = r.ReadByte() + if err != nil { + return header{}, err + } + + h.masked = b&(1<<7) != 0 + + payloadLength := b &^ (1 << 7) + switch { + case payloadLength < 126: + h.payloadLength = int64(payloadLength) + case payloadLength == 126: + _, err = io.ReadFull(r, readBuf[:2]) + h.payloadLength = int64(binary.BigEndian.Uint16(readBuf)) + case payloadLength == 127: + _, err = io.ReadFull(r, readBuf) + h.payloadLength = int64(binary.BigEndian.Uint64(readBuf)) + } + if err != nil { + return header{}, err + } + + if h.payloadLength < 0 { + return header{}, fmt.Errorf("received negative payload length: %v", h.payloadLength) + } + + if h.masked { + _, err = io.ReadFull(r, readBuf[:4]) + if err != nil { + return header{}, err + } + h.maskKey = binary.LittleEndian.Uint32(readBuf) + } + + return h, nil +} + +// maxControlPayload is the maximum length of a control frame payload. +// See https://tools.ietf.org/html/rfc6455#section-5.5. +const maxControlPayload = 125 + +// writeFrameHeader writes the bytes of the header to w. +// See https://tools.ietf.org/html/rfc6455#section-5.2 +func writeFrameHeader(h header, w *bufio.Writer, buf []byte) (err error) { + defer errd.Wrap(&err, "failed to write frame header") + + var b byte + if h.fin { + b |= 1 << 7 + } + if h.rsv1 { + b |= 1 << 6 + } + if h.rsv2 { + b |= 1 << 5 + } + if h.rsv3 { + b |= 1 << 4 + } + + b |= byte(h.opcode) + + err = w.WriteByte(b) + if err != nil { + return err + } + + lengthByte := byte(0) + if h.masked { + lengthByte |= 1 << 7 + } + + switch { + case h.payloadLength > math.MaxUint16: + lengthByte |= 127 + case h.payloadLength > 125: + lengthByte |= 126 + case h.payloadLength >= 0: + lengthByte |= byte(h.payloadLength) + } + err = w.WriteByte(lengthByte) + if err != nil { + return err + } + + switch { + case h.payloadLength > math.MaxUint16: + binary.BigEndian.PutUint64(buf, uint64(h.payloadLength)) + _, err = w.Write(buf) + case h.payloadLength > 125: + binary.BigEndian.PutUint16(buf, uint16(h.payloadLength)) + _, err = w.Write(buf[:2]) + } + if err != nil { + return err + } + + if h.masked { + binary.LittleEndian.PutUint32(buf, h.maskKey) + _, err = w.Write(buf[:4]) + if err != nil { + return err + } + } + + return nil +} + +// mask applies the WebSocket masking algorithm to p +// with the given key. +// See https://tools.ietf.org/html/rfc6455#section-5.3 +// +// The returned value is the correctly rotated key to +// to continue to mask/unmask the message. +// +// It is optimized for LittleEndian and expects the key +// to be in little endian. +// +// See https://github.com/golang/go/issues/31586 +func mask(key uint32, b []byte) uint32 { + if len(b) >= 8 { + key64 := uint64(key)<<32 | uint64(key) + + // At some point in the future we can clean these unrolled loops up. + // See https://github.com/golang/go/issues/31586#issuecomment-487436401 + + // Then we xor until b is less than 128 bytes. + for len(b) >= 128 { + v := binary.LittleEndian.Uint64(b) + binary.LittleEndian.PutUint64(b, v^key64) + v = binary.LittleEndian.Uint64(b[8:16]) + binary.LittleEndian.PutUint64(b[8:16], v^key64) + v = binary.LittleEndian.Uint64(b[16:24]) + binary.LittleEndian.PutUint64(b[16:24], v^key64) + v = binary.LittleEndian.Uint64(b[24:32]) + binary.LittleEndian.PutUint64(b[24:32], v^key64) + v = binary.LittleEndian.Uint64(b[32:40]) + binary.LittleEndian.PutUint64(b[32:40], v^key64) + v = binary.LittleEndian.Uint64(b[40:48]) + binary.LittleEndian.PutUint64(b[40:48], v^key64) + v = binary.LittleEndian.Uint64(b[48:56]) + binary.LittleEndian.PutUint64(b[48:56], v^key64) + v = binary.LittleEndian.Uint64(b[56:64]) + binary.LittleEndian.PutUint64(b[56:64], v^key64) + v = binary.LittleEndian.Uint64(b[64:72]) + binary.LittleEndian.PutUint64(b[64:72], v^key64) + v = binary.LittleEndian.Uint64(b[72:80]) + binary.LittleEndian.PutUint64(b[72:80], v^key64) + v = binary.LittleEndian.Uint64(b[80:88]) + binary.LittleEndian.PutUint64(b[80:88], v^key64) + v = binary.LittleEndian.Uint64(b[88:96]) + binary.LittleEndian.PutUint64(b[88:96], v^key64) + v = binary.LittleEndian.Uint64(b[96:104]) + binary.LittleEndian.PutUint64(b[96:104], v^key64) + v = binary.LittleEndian.Uint64(b[104:112]) + binary.LittleEndian.PutUint64(b[104:112], v^key64) + v = binary.LittleEndian.Uint64(b[112:120]) + binary.LittleEndian.PutUint64(b[112:120], v^key64) + v = binary.LittleEndian.Uint64(b[120:128]) + binary.LittleEndian.PutUint64(b[120:128], v^key64) + b = b[128:] + } + + // Then we xor until b is less than 64 bytes. + for len(b) >= 64 { + v := binary.LittleEndian.Uint64(b) + binary.LittleEndian.PutUint64(b, v^key64) + v = binary.LittleEndian.Uint64(b[8:16]) + binary.LittleEndian.PutUint64(b[8:16], v^key64) + v = binary.LittleEndian.Uint64(b[16:24]) + binary.LittleEndian.PutUint64(b[16:24], v^key64) + v = binary.LittleEndian.Uint64(b[24:32]) + binary.LittleEndian.PutUint64(b[24:32], v^key64) + v = binary.LittleEndian.Uint64(b[32:40]) + binary.LittleEndian.PutUint64(b[32:40], v^key64) + v = binary.LittleEndian.Uint64(b[40:48]) + binary.LittleEndian.PutUint64(b[40:48], v^key64) + v = binary.LittleEndian.Uint64(b[48:56]) + binary.LittleEndian.PutUint64(b[48:56], v^key64) + v = binary.LittleEndian.Uint64(b[56:64]) + binary.LittleEndian.PutUint64(b[56:64], v^key64) + b = b[64:] + } + + // Then we xor until b is less than 32 bytes. + for len(b) >= 32 { + v := binary.LittleEndian.Uint64(b) + binary.LittleEndian.PutUint64(b, v^key64) + v = binary.LittleEndian.Uint64(b[8:16]) + binary.LittleEndian.PutUint64(b[8:16], v^key64) + v = binary.LittleEndian.Uint64(b[16:24]) + binary.LittleEndian.PutUint64(b[16:24], v^key64) + v = binary.LittleEndian.Uint64(b[24:32]) + binary.LittleEndian.PutUint64(b[24:32], v^key64) + b = b[32:] + } + + // Then we xor until b is less than 16 bytes. + for len(b) >= 16 { + v := binary.LittleEndian.Uint64(b) + binary.LittleEndian.PutUint64(b, v^key64) + v = binary.LittleEndian.Uint64(b[8:16]) + binary.LittleEndian.PutUint64(b[8:16], v^key64) + b = b[16:] + } + + // Then we xor until b is less than 8 bytes. + for len(b) >= 8 { + v := binary.LittleEndian.Uint64(b) + binary.LittleEndian.PutUint64(b, v^key64) + b = b[8:] + } + } + + // Then we xor until b is less than 4 bytes. + for len(b) >= 4 { + v := binary.LittleEndian.Uint32(b) + binary.LittleEndian.PutUint32(b, v^key) + b = b[4:] + } + + // xor remaining bytes. + for i := range b { + b[i] ^= byte(key) + key = bits.RotateLeft32(key, -8) + } + + return key +} diff --git a/vendor/nhooyr.io/websocket/internal/bpool/bpool.go b/vendor/nhooyr.io/websocket/internal/bpool/bpool.go new file mode 100644 index 00000000..aa826fba --- /dev/null +++ b/vendor/nhooyr.io/websocket/internal/bpool/bpool.go @@ -0,0 +1,24 @@ +package bpool + +import ( + "bytes" + "sync" +) + +var bpool sync.Pool + +// Get returns a buffer from the pool or creates a new one if +// the pool is empty. +func Get() *bytes.Buffer { + b := bpool.Get() + if b == nil { + return &bytes.Buffer{} + } + return b.(*bytes.Buffer) +} + +// Put returns a buffer into the pool. +func Put(b *bytes.Buffer) { + b.Reset() + bpool.Put(b) +} diff --git a/vendor/nhooyr.io/websocket/internal/errd/wrap.go b/vendor/nhooyr.io/websocket/internal/errd/wrap.go new file mode 100644 index 00000000..6e779131 --- /dev/null +++ b/vendor/nhooyr.io/websocket/internal/errd/wrap.go @@ -0,0 +1,14 @@ +package errd + +import ( + "fmt" +) + +// Wrap wraps err with fmt.Errorf if err is non nil. +// Intended for use with defer and a named error return. +// Inspired by https://github.com/golang/go/issues/32676. +func Wrap(err *error, f string, v ...interface{}) { + if *err != nil { + *err = fmt.Errorf(f+": %w", append(v, *err)...) + } +} diff --git a/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go b/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go new file mode 100644 index 00000000..26ffb456 --- /dev/null +++ b/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go @@ -0,0 +1,170 @@ +// +build js + +// Package wsjs implements typed access to the browser javascript WebSocket API. +// +// https://developer.mozilla.org/en-US/docs/Web/API/WebSocket +package wsjs + +import ( + "syscall/js" +) + +func handleJSError(err *error, onErr func()) { + r := recover() + + if jsErr, ok := r.(js.Error); ok { + *err = jsErr + + if onErr != nil { + onErr() + } + return + } + + if r != nil { + panic(r) + } +} + +// New is a wrapper around the javascript WebSocket constructor. +func New(url string, protocols []string) (c WebSocket, err error) { + defer handleJSError(&err, func() { + c = WebSocket{} + }) + + jsProtocols := make([]interface{}, len(protocols)) + for i, p := range protocols { + jsProtocols[i] = p + } + + c = WebSocket{ + v: js.Global().Get("WebSocket").New(url, jsProtocols), + } + + c.setBinaryType("arraybuffer") + + return c, nil +} + +// WebSocket is a wrapper around a javascript WebSocket object. +type WebSocket struct { + v js.Value +} + +func (c WebSocket) setBinaryType(typ string) { + c.v.Set("binaryType", string(typ)) +} + +func (c WebSocket) addEventListener(eventType string, fn func(e js.Value)) func() { + f := js.FuncOf(func(this js.Value, args []js.Value) interface{} { + fn(args[0]) + return nil + }) + c.v.Call("addEventListener", eventType, f) + + return func() { + c.v.Call("removeEventListener", eventType, f) + f.Release() + } +} + +// CloseEvent is the type passed to a WebSocket close handler. +type CloseEvent struct { + Code uint16 + Reason string + WasClean bool +} + +// OnClose registers a function to be called when the WebSocket is closed. +func (c WebSocket) OnClose(fn func(CloseEvent)) (remove func()) { + return c.addEventListener("close", func(e js.Value) { + ce := CloseEvent{ + Code: uint16(e.Get("code").Int()), + Reason: e.Get("reason").String(), + WasClean: e.Get("wasClean").Bool(), + } + fn(ce) + }) +} + +// OnError registers a function to be called when there is an error +// with the WebSocket. +func (c WebSocket) OnError(fn func(e js.Value)) (remove func()) { + return c.addEventListener("error", fn) +} + +// MessageEvent is the type passed to a message handler. +type MessageEvent struct { + // string or []byte. + Data interface{} + + // There are more fields to the interface but we don't use them. + // See https://developer.mozilla.org/en-US/docs/Web/API/MessageEvent +} + +// OnMessage registers a function to be called when the WebSocket receives a message. +func (c WebSocket) OnMessage(fn func(m MessageEvent)) (remove func()) { + return c.addEventListener("message", func(e js.Value) { + var data interface{} + + arrayBuffer := e.Get("data") + if arrayBuffer.Type() == js.TypeString { + data = arrayBuffer.String() + } else { + data = extractArrayBuffer(arrayBuffer) + } + + me := MessageEvent{ + Data: data, + } + fn(me) + + return + }) +} + +// Subprotocol returns the WebSocket subprotocol in use. +func (c WebSocket) Subprotocol() string { + return c.v.Get("protocol").String() +} + +// OnOpen registers a function to be called when the WebSocket is opened. +func (c WebSocket) OnOpen(fn func(e js.Value)) (remove func()) { + return c.addEventListener("open", fn) +} + +// Close closes the WebSocket with the given code and reason. +func (c WebSocket) Close(code int, reason string) (err error) { + defer handleJSError(&err, nil) + c.v.Call("close", code, reason) + return err +} + +// SendText sends the given string as a text message +// on the WebSocket. +func (c WebSocket) SendText(v string) (err error) { + defer handleJSError(&err, nil) + c.v.Call("send", v) + return err +} + +// SendBytes sends the given message as a binary message +// on the WebSocket. +func (c WebSocket) SendBytes(v []byte) (err error) { + defer handleJSError(&err, nil) + c.v.Call("send", uint8Array(v)) + return err +} + +func extractArrayBuffer(arrayBuffer js.Value) []byte { + uint8Array := js.Global().Get("Uint8Array").New(arrayBuffer) + dst := make([]byte, uint8Array.Length()) + js.CopyBytesToGo(dst, uint8Array) + return dst +} + +func uint8Array(src []byte) js.Value { + uint8Array := js.Global().Get("Uint8Array").New(len(src)) + js.CopyBytesToJS(uint8Array, src) + return uint8Array +} diff --git a/vendor/nhooyr.io/websocket/internal/xsync/go.go b/vendor/nhooyr.io/websocket/internal/xsync/go.go new file mode 100644 index 00000000..7a61f27f --- /dev/null +++ b/vendor/nhooyr.io/websocket/internal/xsync/go.go @@ -0,0 +1,25 @@ +package xsync + +import ( + "fmt" +) + +// Go allows running a function in another goroutine +// and waiting for its error. +func Go(fn func() error) <-chan error { + errs := make(chan error, 1) + go func() { + defer func() { + r := recover() + if r != nil { + select { + case errs <- fmt.Errorf("panic in go fn: %v", r): + default: + } + } + }() + errs <- fn() + }() + + return errs +} diff --git a/vendor/nhooyr.io/websocket/internal/xsync/int64.go b/vendor/nhooyr.io/websocket/internal/xsync/int64.go new file mode 100644 index 00000000..a0c40204 --- /dev/null +++ b/vendor/nhooyr.io/websocket/internal/xsync/int64.go @@ -0,0 +1,23 @@ +package xsync + +import ( + "sync/atomic" +) + +// Int64 represents an atomic int64. +type Int64 struct { + // We do not use atomic.Load/StoreInt64 since it does not + // work on 32 bit computers but we need 64 bit integers. + i atomic.Value +} + +// Load loads the int64. +func (v *Int64) Load() int64 { + i, _ := v.i.Load().(int64) + return i +} + +// Store stores the int64. +func (v *Int64) Store(i int64) { + v.i.Store(i) +} diff --git a/vendor/nhooyr.io/websocket/netconn.go b/vendor/nhooyr.io/websocket/netconn.go new file mode 100644 index 00000000..64aadf0b --- /dev/null +++ b/vendor/nhooyr.io/websocket/netconn.go @@ -0,0 +1,166 @@ +package websocket + +import ( + "context" + "fmt" + "io" + "math" + "net" + "sync" + "time" +) + +// NetConn converts a *websocket.Conn into a net.Conn. +// +// It's for tunneling arbitrary protocols over WebSockets. +// Few users of the library will need this but it's tricky to implement +// correctly and so provided in the library. +// See https://github.com/nhooyr/websocket/issues/100. +// +// Every Write to the net.Conn will correspond to a message write of +// the given type on *websocket.Conn. +// +// The passed ctx bounds the lifetime of the net.Conn. If cancelled, +// all reads and writes on the net.Conn will be cancelled. +// +// If a message is read that is not of the correct type, the connection +// will be closed with StatusUnsupportedData and an error will be returned. +// +// Close will close the *websocket.Conn with StatusNormalClosure. +// +// When a deadline is hit, the connection will be closed. This is +// different from most net.Conn implementations where only the +// reading/writing goroutines are interrupted but the connection is kept alive. +// +// The Addr methods will return a mock net.Addr that returns "websocket" for Network +// and "websocket/unknown-addr" for String. +// +// A received StatusNormalClosure or StatusGoingAway close frame will be translated to +// io.EOF when reading. +func NetConn(ctx context.Context, c *Conn, msgType MessageType) net.Conn { + nc := &netConn{ + c: c, + msgType: msgType, + } + + var cancel context.CancelFunc + nc.writeContext, cancel = context.WithCancel(ctx) + nc.writeTimer = time.AfterFunc(math.MaxInt64, cancel) + if !nc.writeTimer.Stop() { + <-nc.writeTimer.C + } + + nc.readContext, cancel = context.WithCancel(ctx) + nc.readTimer = time.AfterFunc(math.MaxInt64, cancel) + if !nc.readTimer.Stop() { + <-nc.readTimer.C + } + + return nc +} + +type netConn struct { + c *Conn + msgType MessageType + + writeTimer *time.Timer + writeContext context.Context + + readTimer *time.Timer + readContext context.Context + + readMu sync.Mutex + eofed bool + reader io.Reader +} + +var _ net.Conn = &netConn{} + +func (c *netConn) Close() error { + return c.c.Close(StatusNormalClosure, "") +} + +func (c *netConn) Write(p []byte) (int, error) { + err := c.c.Write(c.writeContext, c.msgType, p) + if err != nil { + return 0, err + } + return len(p), nil +} + +func (c *netConn) Read(p []byte) (int, error) { + c.readMu.Lock() + defer c.readMu.Unlock() + + if c.eofed { + return 0, io.EOF + } + + if c.reader == nil { + typ, r, err := c.c.Reader(c.readContext) + if err != nil { + switch CloseStatus(err) { + case StatusNormalClosure, StatusGoingAway: + c.eofed = true + return 0, io.EOF + } + return 0, err + } + if typ != c.msgType { + err := fmt.Errorf("unexpected frame type read (expected %v): %v", c.msgType, typ) + c.c.Close(StatusUnsupportedData, err.Error()) + return 0, err + } + c.reader = r + } + + n, err := c.reader.Read(p) + if err == io.EOF { + c.reader = nil + err = nil + } + return n, err +} + +type websocketAddr struct { +} + +func (a websocketAddr) Network() string { + return "websocket" +} + +func (a websocketAddr) String() string { + return "websocket/unknown-addr" +} + +func (c *netConn) RemoteAddr() net.Addr { + return websocketAddr{} +} + +func (c *netConn) LocalAddr() net.Addr { + return websocketAddr{} +} + +func (c *netConn) SetDeadline(t time.Time) error { + c.SetWriteDeadline(t) + c.SetReadDeadline(t) + return nil +} + +func (c *netConn) SetWriteDeadline(t time.Time) error { + if t.IsZero() { + c.writeTimer.Stop() + } else { + c.writeTimer.Reset(t.Sub(time.Now())) + } + return nil +} + +func (c *netConn) SetReadDeadline(t time.Time) error { + if t.IsZero() { + c.readTimer.Stop() + } else { + c.readTimer.Reset(t.Sub(time.Now())) + } + return nil +} diff --git a/vendor/nhooyr.io/websocket/read.go b/vendor/nhooyr.io/websocket/read.go new file mode 100644 index 00000000..ae05cf93 --- /dev/null +++ b/vendor/nhooyr.io/websocket/read.go @@ -0,0 +1,474 @@ +// +build !js + +package websocket + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "strings" + "time" + + "nhooyr.io/websocket/internal/errd" + "nhooyr.io/websocket/internal/xsync" +) + +// Reader reads from the connection until until there is a WebSocket +// data message to be read. It will handle ping, pong and close frames as appropriate. +// +// It returns the type of the message and an io.Reader to read it. +// The passed context will also bound the reader. +// Ensure you read to EOF otherwise the connection will hang. +// +// Call CloseRead if you do not expect any data messages from the peer. +// +// Only one Reader may be open at a time. +func (c *Conn) Reader(ctx context.Context) (MessageType, io.Reader, error) { + return c.reader(ctx) +} + +// Read is a convenience method around Reader to read a single message +// from the connection. +func (c *Conn) Read(ctx context.Context) (MessageType, []byte, error) { + typ, r, err := c.Reader(ctx) + if err != nil { + return 0, nil, err + } + + b, err := ioutil.ReadAll(r) + return typ, b, err +} + +// CloseRead starts a goroutine to read from the connection until it is closed +// or a data message is received. +// +// Once CloseRead is called you cannot read any messages from the connection. +// The returned context will be cancelled when the connection is closed. +// +// If a data message is received, the connection will be closed with StatusPolicyViolation. +// +// Call CloseRead when you do not expect to read any more messages. +// Since it actively reads from the connection, it will ensure that ping, pong and close +// frames are responded to. This means c.Ping and c.Close will still work as expected. +func (c *Conn) CloseRead(ctx context.Context) context.Context { + ctx, cancel := context.WithCancel(ctx) + go func() { + defer cancel() + c.Reader(ctx) + c.Close(StatusPolicyViolation, "unexpected data message") + }() + return ctx +} + +// SetReadLimit sets the max number of bytes to read for a single message. +// It applies to the Reader and Read methods. +// +// By default, the connection has a message read limit of 32768 bytes. +// +// When the limit is hit, the connection will be closed with StatusMessageTooBig. +func (c *Conn) SetReadLimit(n int64) { + // We add read one more byte than the limit in case + // there is a fin frame that needs to be read. + c.msgReader.limitReader.limit.Store(n + 1) +} + +const defaultReadLimit = 32768 + +func newMsgReader(c *Conn) *msgReader { + mr := &msgReader{ + c: c, + fin: true, + } + mr.readFunc = mr.read + + mr.limitReader = newLimitReader(c, mr.readFunc, defaultReadLimit+1) + return mr +} + +func (mr *msgReader) resetFlate() { + if mr.flateContextTakeover() { + mr.dict.init(32768) + } + if mr.flateBufio == nil { + mr.flateBufio = getBufioReader(mr.readFunc) + } + + mr.flateReader = getFlateReader(mr.flateBufio, mr.dict.buf) + mr.limitReader.r = mr.flateReader + mr.flateTail.Reset(deflateMessageTail) +} + +func (mr *msgReader) putFlateReader() { + if mr.flateReader != nil { + putFlateReader(mr.flateReader) + mr.flateReader = nil + } +} + +func (mr *msgReader) close() { + mr.c.readMu.forceLock() + mr.putFlateReader() + mr.dict.close() + if mr.flateBufio != nil { + putBufioReader(mr.flateBufio) + } + + if mr.c.client { + putBufioReader(mr.c.br) + mr.c.br = nil + } +} + +func (mr *msgReader) flateContextTakeover() bool { + if mr.c.client { + return !mr.c.copts.serverNoContextTakeover + } + return !mr.c.copts.clientNoContextTakeover +} + +func (c *Conn) readRSV1Illegal(h header) bool { + // If compression is disabled, rsv1 is illegal. + if !c.flate() { + return true + } + // rsv1 is only allowed on data frames beginning messages. + if h.opcode != opText && h.opcode != opBinary { + return true + } + return false +} + +func (c *Conn) readLoop(ctx context.Context) (header, error) { + for { + h, err := c.readFrameHeader(ctx) + if err != nil { + return header{}, err + } + + if h.rsv1 && c.readRSV1Illegal(h) || h.rsv2 || h.rsv3 { + err := fmt.Errorf("received header with unexpected rsv bits set: %v:%v:%v", h.rsv1, h.rsv2, h.rsv3) + c.writeError(StatusProtocolError, err) + return header{}, err + } + + if !c.client && !h.masked { + return header{}, errors.New("received unmasked frame from client") + } + + switch h.opcode { + case opClose, opPing, opPong: + err = c.handleControl(ctx, h) + if err != nil { + // Pass through CloseErrors when receiving a close frame. + if h.opcode == opClose && CloseStatus(err) != -1 { + return header{}, err + } + return header{}, fmt.Errorf("failed to handle control frame %v: %w", h.opcode, err) + } + case opContinuation, opText, opBinary: + return h, nil + default: + err := fmt.Errorf("received unknown opcode %v", h.opcode) + c.writeError(StatusProtocolError, err) + return header{}, err + } + } +} + +func (c *Conn) readFrameHeader(ctx context.Context) (header, error) { + select { + case <-c.closed: + return header{}, c.closeErr + case c.readTimeout <- ctx: + } + + h, err := readFrameHeader(c.br, c.readHeaderBuf[:]) + if err != nil { + select { + case <-c.closed: + return header{}, c.closeErr + case <-ctx.Done(): + return header{}, ctx.Err() + default: + c.close(err) + return header{}, err + } + } + + select { + case <-c.closed: + return header{}, c.closeErr + case c.readTimeout <- context.Background(): + } + + return h, nil +} + +func (c *Conn) readFramePayload(ctx context.Context, p []byte) (int, error) { + select { + case <-c.closed: + return 0, c.closeErr + case c.readTimeout <- ctx: + } + + n, err := io.ReadFull(c.br, p) + if err != nil { + select { + case <-c.closed: + return n, c.closeErr + case <-ctx.Done(): + return n, ctx.Err() + default: + err = fmt.Errorf("failed to read frame payload: %w", err) + c.close(err) + return n, err + } + } + + select { + case <-c.closed: + return n, c.closeErr + case c.readTimeout <- context.Background(): + } + + return n, err +} + +func (c *Conn) handleControl(ctx context.Context, h header) (err error) { + if h.payloadLength < 0 || h.payloadLength > maxControlPayload { + err := fmt.Errorf("received control frame payload with invalid length: %d", h.payloadLength) + c.writeError(StatusProtocolError, err) + return err + } + + if !h.fin { + err := errors.New("received fragmented control frame") + c.writeError(StatusProtocolError, err) + return err + } + + ctx, cancel := context.WithTimeout(ctx, time.Second*5) + defer cancel() + + b := c.readControlBuf[:h.payloadLength] + _, err = c.readFramePayload(ctx, b) + if err != nil { + return err + } + + if h.masked { + mask(h.maskKey, b) + } + + switch h.opcode { + case opPing: + return c.writeControl(ctx, opPong, b) + case opPong: + c.activePingsMu.Lock() + pong, ok := c.activePings[string(b)] + c.activePingsMu.Unlock() + if ok { + select { + case pong <- struct{}{}: + default: + } + } + return nil + } + + defer func() { + c.readCloseFrameErr = err + }() + + ce, err := parseClosePayload(b) + if err != nil { + err = fmt.Errorf("received invalid close payload: %w", err) + c.writeError(StatusProtocolError, err) + return err + } + + err = fmt.Errorf("received close frame: %w", ce) + c.setCloseErr(err) + c.writeClose(ce.Code, ce.Reason) + c.close(err) + return err +} + +func (c *Conn) reader(ctx context.Context) (_ MessageType, _ io.Reader, err error) { + defer errd.Wrap(&err, "failed to get reader") + + err = c.readMu.lock(ctx) + if err != nil { + return 0, nil, err + } + defer c.readMu.unlock() + + if !c.msgReader.fin { + err = errors.New("previous message not read to completion") + c.close(fmt.Errorf("failed to get reader: %w", err)) + return 0, nil, err + } + + h, err := c.readLoop(ctx) + if err != nil { + return 0, nil, err + } + + if h.opcode == opContinuation { + err := errors.New("received continuation frame without text or binary frame") + c.writeError(StatusProtocolError, err) + return 0, nil, err + } + + c.msgReader.reset(ctx, h) + + return MessageType(h.opcode), c.msgReader, nil +} + +type msgReader struct { + c *Conn + + ctx context.Context + flate bool + flateReader io.Reader + flateBufio *bufio.Reader + flateTail strings.Reader + limitReader *limitReader + dict slidingWindow + + fin bool + payloadLength int64 + maskKey uint32 + + // readerFunc(mr.Read) to avoid continuous allocations. + readFunc readerFunc +} + +func (mr *msgReader) reset(ctx context.Context, h header) { + mr.ctx = ctx + mr.flate = h.rsv1 + mr.limitReader.reset(mr.readFunc) + + if mr.flate { + mr.resetFlate() + } + + mr.setFrame(h) +} + +func (mr *msgReader) setFrame(h header) { + mr.fin = h.fin + mr.payloadLength = h.payloadLength + mr.maskKey = h.maskKey +} + +func (mr *msgReader) Read(p []byte) (n int, err error) { + err = mr.c.readMu.lock(mr.ctx) + if err != nil { + return 0, fmt.Errorf("failed to read: %w", err) + } + defer mr.c.readMu.unlock() + + n, err = mr.limitReader.Read(p) + if mr.flate && mr.flateContextTakeover() { + p = p[:n] + mr.dict.write(p) + } + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) && mr.fin && mr.flate { + mr.putFlateReader() + return n, io.EOF + } + if err != nil { + err = fmt.Errorf("failed to read: %w", err) + mr.c.close(err) + } + return n, err +} + +func (mr *msgReader) read(p []byte) (int, error) { + for { + if mr.payloadLength == 0 { + if mr.fin { + if mr.flate { + return mr.flateTail.Read(p) + } + return 0, io.EOF + } + + h, err := mr.c.readLoop(mr.ctx) + if err != nil { + return 0, err + } + if h.opcode != opContinuation { + err := errors.New("received new data message without finishing the previous message") + mr.c.writeError(StatusProtocolError, err) + return 0, err + } + mr.setFrame(h) + + continue + } + + if int64(len(p)) > mr.payloadLength { + p = p[:mr.payloadLength] + } + + n, err := mr.c.readFramePayload(mr.ctx, p) + if err != nil { + return n, err + } + + mr.payloadLength -= int64(n) + + if !mr.c.client { + mr.maskKey = mask(mr.maskKey, p) + } + + return n, nil + } +} + +type limitReader struct { + c *Conn + r io.Reader + limit xsync.Int64 + n int64 +} + +func newLimitReader(c *Conn, r io.Reader, limit int64) *limitReader { + lr := &limitReader{ + c: c, + } + lr.limit.Store(limit) + lr.reset(r) + return lr +} + +func (lr *limitReader) reset(r io.Reader) { + lr.n = lr.limit.Load() + lr.r = r +} + +func (lr *limitReader) Read(p []byte) (int, error) { + if lr.n <= 0 { + err := fmt.Errorf("read limited at %v bytes", lr.limit.Load()) + lr.c.writeError(StatusMessageTooBig, err) + return 0, err + } + + if int64(len(p)) > lr.n { + p = p[:lr.n] + } + n, err := lr.r.Read(p) + lr.n -= int64(n) + return n, err +} + +type readerFunc func(p []byte) (int, error) + +func (f readerFunc) Read(p []byte) (int, error) { + return f(p) +} diff --git a/vendor/nhooyr.io/websocket/stringer.go b/vendor/nhooyr.io/websocket/stringer.go new file mode 100644 index 00000000..5a66ba29 --- /dev/null +++ b/vendor/nhooyr.io/websocket/stringer.go @@ -0,0 +1,91 @@ +// Code generated by "stringer -type=opcode,MessageType,StatusCode -output=stringer.go"; DO NOT EDIT. + +package websocket + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[opContinuation-0] + _ = x[opText-1] + _ = x[opBinary-2] + _ = x[opClose-8] + _ = x[opPing-9] + _ = x[opPong-10] +} + +const ( + _opcode_name_0 = "opContinuationopTextopBinary" + _opcode_name_1 = "opCloseopPingopPong" +) + +var ( + _opcode_index_0 = [...]uint8{0, 14, 20, 28} + _opcode_index_1 = [...]uint8{0, 7, 13, 19} +) + +func (i opcode) String() string { + switch { + case 0 <= i && i <= 2: + return _opcode_name_0[_opcode_index_0[i]:_opcode_index_0[i+1]] + case 8 <= i && i <= 10: + i -= 8 + return _opcode_name_1[_opcode_index_1[i]:_opcode_index_1[i+1]] + default: + return "opcode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[MessageText-1] + _ = x[MessageBinary-2] +} + +const _MessageType_name = "MessageTextMessageBinary" + +var _MessageType_index = [...]uint8{0, 11, 24} + +func (i MessageType) String() string { + i -= 1 + if i < 0 || i >= MessageType(len(_MessageType_index)-1) { + return "MessageType(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _MessageType_name[_MessageType_index[i]:_MessageType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[StatusNormalClosure-1000] + _ = x[StatusGoingAway-1001] + _ = x[StatusProtocolError-1002] + _ = x[StatusUnsupportedData-1003] + _ = x[statusReserved-1004] + _ = x[StatusNoStatusRcvd-1005] + _ = x[StatusAbnormalClosure-1006] + _ = x[StatusInvalidFramePayloadData-1007] + _ = x[StatusPolicyViolation-1008] + _ = x[StatusMessageTooBig-1009] + _ = x[StatusMandatoryExtension-1010] + _ = x[StatusInternalError-1011] + _ = x[StatusServiceRestart-1012] + _ = x[StatusTryAgainLater-1013] + _ = x[StatusBadGateway-1014] + _ = x[StatusTLSHandshake-1015] +} + +const _StatusCode_name = "StatusNormalClosureStatusGoingAwayStatusProtocolErrorStatusUnsupportedDatastatusReservedStatusNoStatusRcvdStatusAbnormalClosureStatusInvalidFramePayloadDataStatusPolicyViolationStatusMessageTooBigStatusMandatoryExtensionStatusInternalErrorStatusServiceRestartStatusTryAgainLaterStatusBadGatewayStatusTLSHandshake" + +var _StatusCode_index = [...]uint16{0, 19, 34, 53, 74, 88, 106, 127, 156, 177, 196, 220, 239, 259, 278, 294, 312} + +func (i StatusCode) String() string { + i -= 1000 + if i < 0 || i >= StatusCode(len(_StatusCode_index)-1) { + return "StatusCode(" + strconv.FormatInt(int64(i+1000), 10) + ")" + } + return _StatusCode_name[_StatusCode_index[i]:_StatusCode_index[i+1]] +} diff --git a/vendor/nhooyr.io/websocket/write.go b/vendor/nhooyr.io/websocket/write.go new file mode 100644 index 00000000..2210cf81 --- /dev/null +++ b/vendor/nhooyr.io/websocket/write.go @@ -0,0 +1,397 @@ +// +build !js + +package websocket + +import ( + "bufio" + "context" + "crypto/rand" + "encoding/binary" + "errors" + "fmt" + "io" + "time" + + "github.com/klauspost/compress/flate" + + "nhooyr.io/websocket/internal/errd" +) + +// Writer returns a writer bounded by the context that will write +// a WebSocket message of type dataType to the connection. +// +// You must close the writer once you have written the entire message. +// +// Only one writer can be open at a time, multiple calls will block until the previous writer +// is closed. +func (c *Conn) Writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) { + w, err := c.writer(ctx, typ) + if err != nil { + return nil, fmt.Errorf("failed to get writer: %w", err) + } + return w, nil +} + +// Write writes a message to the connection. +// +// See the Writer method if you want to stream a message. +// +// If compression is disabled or the threshold is not met, then it +// will write the message in a single frame. +func (c *Conn) Write(ctx context.Context, typ MessageType, p []byte) error { + _, err := c.write(ctx, typ, p) + if err != nil { + return fmt.Errorf("failed to write msg: %w", err) + } + return nil +} + +type msgWriter struct { + mw *msgWriterState + closed bool +} + +func (mw *msgWriter) Write(p []byte) (int, error) { + if mw.closed { + return 0, errors.New("cannot use closed writer") + } + return mw.mw.Write(p) +} + +func (mw *msgWriter) Close() error { + if mw.closed { + return errors.New("cannot use closed writer") + } + mw.closed = true + return mw.mw.Close() +} + +type msgWriterState struct { + c *Conn + + mu *mu + writeMu *mu + + ctx context.Context + opcode opcode + flate bool + + trimWriter *trimLastFourBytesWriter + dict slidingWindow +} + +func newMsgWriterState(c *Conn) *msgWriterState { + mw := &msgWriterState{ + c: c, + mu: newMu(c), + writeMu: newMu(c), + } + return mw +} + +func (mw *msgWriterState) ensureFlate() { + if mw.trimWriter == nil { + mw.trimWriter = &trimLastFourBytesWriter{ + w: writerFunc(mw.write), + } + } + + mw.dict.init(8192) + mw.flate = true +} + +func (mw *msgWriterState) flateContextTakeover() bool { + if mw.c.client { + return !mw.c.copts.clientNoContextTakeover + } + return !mw.c.copts.serverNoContextTakeover +} + +func (c *Conn) writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) { + err := c.msgWriterState.reset(ctx, typ) + if err != nil { + return nil, err + } + return &msgWriter{ + mw: c.msgWriterState, + closed: false, + }, nil +} + +func (c *Conn) write(ctx context.Context, typ MessageType, p []byte) (int, error) { + mw, err := c.writer(ctx, typ) + if err != nil { + return 0, err + } + + if !c.flate() { + defer c.msgWriterState.mu.unlock() + return c.writeFrame(ctx, true, false, c.msgWriterState.opcode, p) + } + + n, err := mw.Write(p) + if err != nil { + return n, err + } + + err = mw.Close() + return n, err +} + +func (mw *msgWriterState) reset(ctx context.Context, typ MessageType) error { + err := mw.mu.lock(ctx) + if err != nil { + return err + } + + mw.ctx = ctx + mw.opcode = opcode(typ) + mw.flate = false + + mw.trimWriter.reset() + + return nil +} + +// Write writes the given bytes to the WebSocket connection. +func (mw *msgWriterState) Write(p []byte) (_ int, err error) { + err = mw.writeMu.lock(mw.ctx) + if err != nil { + return 0, fmt.Errorf("failed to write: %w", err) + } + defer mw.writeMu.unlock() + + defer func() { + if err != nil { + err = fmt.Errorf("failed to write: %w", err) + mw.c.close(err) + } + }() + + if mw.c.flate() { + // Only enables flate if the length crosses the + // threshold on the first frame + if mw.opcode != opContinuation && len(p) >= mw.c.flateThreshold { + mw.ensureFlate() + } + } + + if mw.flate { + err = flate.StatelessDeflate(mw.trimWriter, p, false, mw.dict.buf) + if err != nil { + return 0, err + } + mw.dict.write(p) + return len(p), nil + } + + return mw.write(p) +} + +func (mw *msgWriterState) write(p []byte) (int, error) { + n, err := mw.c.writeFrame(mw.ctx, false, mw.flate, mw.opcode, p) + if err != nil { + return n, fmt.Errorf("failed to write data frame: %w", err) + } + mw.opcode = opContinuation + return n, nil +} + +// Close flushes the frame to the connection. +func (mw *msgWriterState) Close() (err error) { + defer errd.Wrap(&err, "failed to close writer") + + err = mw.writeMu.lock(mw.ctx) + if err != nil { + return err + } + defer mw.writeMu.unlock() + + _, err = mw.c.writeFrame(mw.ctx, true, mw.flate, mw.opcode, nil) + if err != nil { + return fmt.Errorf("failed to write fin frame: %w", err) + } + + if mw.flate && !mw.flateContextTakeover() { + mw.dict.close() + } + mw.mu.unlock() + return nil +} + +func (mw *msgWriterState) close() { + if mw.c.client { + mw.c.writeFrameMu.forceLock() + putBufioWriter(mw.c.bw) + } + + mw.writeMu.forceLock() + mw.dict.close() +} + +func (c *Conn) writeControl(ctx context.Context, opcode opcode, p []byte) error { + ctx, cancel := context.WithTimeout(ctx, time.Second*5) + defer cancel() + + _, err := c.writeFrame(ctx, true, false, opcode, p) + if err != nil { + return fmt.Errorf("failed to write control frame %v: %w", opcode, err) + } + return nil +} + +// frame handles all writes to the connection. +func (c *Conn) writeFrame(ctx context.Context, fin bool, flate bool, opcode opcode, p []byte) (_ int, err error) { + err = c.writeFrameMu.lock(ctx) + if err != nil { + return 0, err + } + defer c.writeFrameMu.unlock() + + // If the state says a close has already been written, we wait until + // the connection is closed and return that error. + // + // However, if the frame being written is a close, that means its the close from + // the state being set so we let it go through. + c.closeMu.Lock() + wroteClose := c.wroteClose + c.closeMu.Unlock() + if wroteClose && opcode != opClose { + select { + case <-ctx.Done(): + return 0, ctx.Err() + case <-c.closed: + return 0, c.closeErr + } + } + + select { + case <-c.closed: + return 0, c.closeErr + case c.writeTimeout <- ctx: + } + + defer func() { + if err != nil { + select { + case <-c.closed: + err = c.closeErr + case <-ctx.Done(): + err = ctx.Err() + } + c.close(err) + err = fmt.Errorf("failed to write frame: %w", err) + } + }() + + c.writeHeader.fin = fin + c.writeHeader.opcode = opcode + c.writeHeader.payloadLength = int64(len(p)) + + if c.client { + c.writeHeader.masked = true + _, err = io.ReadFull(rand.Reader, c.writeHeaderBuf[:4]) + if err != nil { + return 0, fmt.Errorf("failed to generate masking key: %w", err) + } + c.writeHeader.maskKey = binary.LittleEndian.Uint32(c.writeHeaderBuf[:]) + } + + c.writeHeader.rsv1 = false + if flate && (opcode == opText || opcode == opBinary) { + c.writeHeader.rsv1 = true + } + + err = writeFrameHeader(c.writeHeader, c.bw, c.writeHeaderBuf[:]) + if err != nil { + return 0, err + } + + n, err := c.writeFramePayload(p) + if err != nil { + return n, err + } + + if c.writeHeader.fin { + err = c.bw.Flush() + if err != nil { + return n, fmt.Errorf("failed to flush: %w", err) + } + } + + select { + case <-c.closed: + return n, c.closeErr + case c.writeTimeout <- context.Background(): + } + + return n, nil +} + +func (c *Conn) writeFramePayload(p []byte) (n int, err error) { + defer errd.Wrap(&err, "failed to write frame payload") + + if !c.writeHeader.masked { + return c.bw.Write(p) + } + + maskKey := c.writeHeader.maskKey + for len(p) > 0 { + // If the buffer is full, we need to flush. + if c.bw.Available() == 0 { + err = c.bw.Flush() + if err != nil { + return n, err + } + } + + // Start of next write in the buffer. + i := c.bw.Buffered() + + j := len(p) + if j > c.bw.Available() { + j = c.bw.Available() + } + + _, err := c.bw.Write(p[:j]) + if err != nil { + return n, err + } + + maskKey = mask(maskKey, c.writeBuf[i:c.bw.Buffered()]) + + p = p[j:] + n += j + } + + return n, nil +} + +type writerFunc func(p []byte) (int, error) + +func (f writerFunc) Write(p []byte) (int, error) { + return f(p) +} + +// extractBufioWriterBuf grabs the []byte backing a *bufio.Writer +// and returns it. +func extractBufioWriterBuf(bw *bufio.Writer, w io.Writer) []byte { + var writeBuf []byte + bw.Reset(writerFunc(func(p2 []byte) (int, error) { + writeBuf = p2[:cap(p2)] + return len(p2), nil + })) + + bw.WriteByte(0) + bw.Flush() + + bw.Reset(w) + + return writeBuf +} + +func (c *Conn) writeError(code StatusCode, err error) { + c.setCloseErr(err) + c.writeClose(code, err.Error()) + c.close(nil) +} diff --git a/vendor/nhooyr.io/websocket/ws_js.go b/vendor/nhooyr.io/websocket/ws_js.go new file mode 100644 index 00000000..b87e32cd --- /dev/null +++ b/vendor/nhooyr.io/websocket/ws_js.go @@ -0,0 +1,379 @@ +package websocket // import "nhooyr.io/websocket" + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/http" + "reflect" + "runtime" + "strings" + "sync" + "syscall/js" + + "nhooyr.io/websocket/internal/bpool" + "nhooyr.io/websocket/internal/wsjs" + "nhooyr.io/websocket/internal/xsync" +) + +// Conn provides a wrapper around the browser WebSocket API. +type Conn struct { + ws wsjs.WebSocket + + // read limit for a message in bytes. + msgReadLimit xsync.Int64 + + closingMu sync.Mutex + isReadClosed xsync.Int64 + closeOnce sync.Once + closed chan struct{} + closeErrOnce sync.Once + closeErr error + closeWasClean bool + + releaseOnClose func() + releaseOnMessage func() + + readSignal chan struct{} + readBufMu sync.Mutex + readBuf []wsjs.MessageEvent +} + +func (c *Conn) close(err error, wasClean bool) { + c.closeOnce.Do(func() { + runtime.SetFinalizer(c, nil) + + if !wasClean { + err = fmt.Errorf("unclean connection close: %w", err) + } + c.setCloseErr(err) + c.closeWasClean = wasClean + close(c.closed) + }) +} + +func (c *Conn) init() { + c.closed = make(chan struct{}) + c.readSignal = make(chan struct{}, 1) + + c.msgReadLimit.Store(32768) + + c.releaseOnClose = c.ws.OnClose(func(e wsjs.CloseEvent) { + err := CloseError{ + Code: StatusCode(e.Code), + Reason: e.Reason, + } + // We do not know if we sent or received this close as + // its possible the browser triggered it without us + // explicitly sending it. + c.close(err, e.WasClean) + + c.releaseOnClose() + c.releaseOnMessage() + }) + + c.releaseOnMessage = c.ws.OnMessage(func(e wsjs.MessageEvent) { + c.readBufMu.Lock() + defer c.readBufMu.Unlock() + + c.readBuf = append(c.readBuf, e) + + // Lets the read goroutine know there is definitely something in readBuf. + select { + case c.readSignal <- struct{}{}: + default: + } + }) + + runtime.SetFinalizer(c, func(c *Conn) { + c.setCloseErr(errors.New("connection garbage collected")) + c.closeWithInternal() + }) +} + +func (c *Conn) closeWithInternal() { + c.Close(StatusInternalError, "something went wrong") +} + +// Read attempts to read a message from the connection. +// The maximum time spent waiting is bounded by the context. +func (c *Conn) Read(ctx context.Context) (MessageType, []byte, error) { + if c.isReadClosed.Load() == 1 { + return 0, nil, errors.New("WebSocket connection read closed") + } + + typ, p, err := c.read(ctx) + if err != nil { + return 0, nil, fmt.Errorf("failed to read: %w", err) + } + if int64(len(p)) > c.msgReadLimit.Load() { + err := fmt.Errorf("read limited at %v bytes", c.msgReadLimit.Load()) + c.Close(StatusMessageTooBig, err.Error()) + return 0, nil, err + } + return typ, p, nil +} + +func (c *Conn) read(ctx context.Context) (MessageType, []byte, error) { + select { + case <-ctx.Done(): + c.Close(StatusPolicyViolation, "read timed out") + return 0, nil, ctx.Err() + case <-c.readSignal: + case <-c.closed: + return 0, nil, c.closeErr + } + + c.readBufMu.Lock() + defer c.readBufMu.Unlock() + + me := c.readBuf[0] + // We copy the messages forward and decrease the size + // of the slice to avoid reallocating. + copy(c.readBuf, c.readBuf[1:]) + c.readBuf = c.readBuf[:len(c.readBuf)-1] + + if len(c.readBuf) > 0 { + // Next time we read, we'll grab the message. + select { + case c.readSignal <- struct{}{}: + default: + } + } + + switch p := me.Data.(type) { + case string: + return MessageText, []byte(p), nil + case []byte: + return MessageBinary, p, nil + default: + panic("websocket: unexpected data type from wsjs OnMessage: " + reflect.TypeOf(me.Data).String()) + } +} + +// Ping is mocked out for Wasm. +func (c *Conn) Ping(ctx context.Context) error { + return nil +} + +// Write writes a message of the given type to the connection. +// Always non blocking. +func (c *Conn) Write(ctx context.Context, typ MessageType, p []byte) error { + err := c.write(ctx, typ, p) + if err != nil { + // Have to ensure the WebSocket is closed after a write error + // to match the Go API. It can only error if the message type + // is unexpected or the passed bytes contain invalid UTF-8 for + // MessageText. + err := fmt.Errorf("failed to write: %w", err) + c.setCloseErr(err) + c.closeWithInternal() + return err + } + return nil +} + +func (c *Conn) write(ctx context.Context, typ MessageType, p []byte) error { + if c.isClosed() { + return c.closeErr + } + switch typ { + case MessageBinary: + return c.ws.SendBytes(p) + case MessageText: + return c.ws.SendText(string(p)) + default: + return fmt.Errorf("unexpected message type: %v", typ) + } +} + +// Close closes the WebSocket with the given code and reason. +// It will wait until the peer responds with a close frame +// or the connection is closed. +// It thus performs the full WebSocket close handshake. +func (c *Conn) Close(code StatusCode, reason string) error { + err := c.exportedClose(code, reason) + if err != nil { + return fmt.Errorf("failed to close WebSocket: %w", err) + } + return nil +} + +func (c *Conn) exportedClose(code StatusCode, reason string) error { + c.closingMu.Lock() + defer c.closingMu.Unlock() + + ce := fmt.Errorf("sent close: %w", CloseError{ + Code: code, + Reason: reason, + }) + + if c.isClosed() { + return fmt.Errorf("tried to close with %q but connection already closed: %w", ce, c.closeErr) + } + + c.setCloseErr(ce) + err := c.ws.Close(int(code), reason) + if err != nil { + return err + } + + <-c.closed + if !c.closeWasClean { + return c.closeErr + } + return nil +} + +// Subprotocol returns the negotiated subprotocol. +// An empty string means the default protocol. +func (c *Conn) Subprotocol() string { + return c.ws.Subprotocol() +} + +// DialOptions represents the options available to pass to Dial. +type DialOptions struct { + // Subprotocols lists the subprotocols to negotiate with the server. + Subprotocols []string +} + +// Dial creates a new WebSocket connection to the given url with the given options. +// The passed context bounds the maximum time spent waiting for the connection to open. +// The returned *http.Response is always nil or a mock. It's only in the signature +// to match the core API. +func Dial(ctx context.Context, url string, opts *DialOptions) (*Conn, *http.Response, error) { + c, resp, err := dial(ctx, url, opts) + if err != nil { + return nil, nil, fmt.Errorf("failed to WebSocket dial %q: %w", url, err) + } + return c, resp, nil +} + +func dial(ctx context.Context, url string, opts *DialOptions) (*Conn, *http.Response, error) { + if opts == nil { + opts = &DialOptions{} + } + + url = strings.Replace(url, "http://", "ws://", 1) + url = strings.Replace(url, "https://", "wss://", 1) + + ws, err := wsjs.New(url, opts.Subprotocols) + if err != nil { + return nil, nil, err + } + + c := &Conn{ + ws: ws, + } + c.init() + + opench := make(chan struct{}) + releaseOpen := ws.OnOpen(func(e js.Value) { + close(opench) + }) + defer releaseOpen() + + select { + case <-ctx.Done(): + c.Close(StatusPolicyViolation, "dial timed out") + return nil, nil, ctx.Err() + case <-opench: + return c, &http.Response{ + StatusCode: http.StatusSwitchingProtocols, + }, nil + case <-c.closed: + return nil, nil, c.closeErr + } +} + +// Reader attempts to read a message from the connection. +// The maximum time spent waiting is bounded by the context. +func (c *Conn) Reader(ctx context.Context) (MessageType, io.Reader, error) { + typ, p, err := c.Read(ctx) + if err != nil { + return 0, nil, err + } + return typ, bytes.NewReader(p), nil +} + +// Writer returns a writer to write a WebSocket data message to the connection. +// It buffers the entire message in memory and then sends it when the writer +// is closed. +func (c *Conn) Writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) { + return writer{ + c: c, + ctx: ctx, + typ: typ, + b: bpool.Get(), + }, nil +} + +type writer struct { + closed bool + + c *Conn + ctx context.Context + typ MessageType + + b *bytes.Buffer +} + +func (w writer) Write(p []byte) (int, error) { + if w.closed { + return 0, errors.New("cannot write to closed writer") + } + n, err := w.b.Write(p) + if err != nil { + return n, fmt.Errorf("failed to write message: %w", err) + } + return n, nil +} + +func (w writer) Close() error { + if w.closed { + return errors.New("cannot close closed writer") + } + w.closed = true + defer bpool.Put(w.b) + + err := w.c.Write(w.ctx, w.typ, w.b.Bytes()) + if err != nil { + return fmt.Errorf("failed to close writer: %w", err) + } + return nil +} + +// CloseRead implements *Conn.CloseRead for wasm. +func (c *Conn) CloseRead(ctx context.Context) context.Context { + c.isReadClosed.Store(1) + + ctx, cancel := context.WithCancel(ctx) + go func() { + defer cancel() + c.read(ctx) + c.Close(StatusPolicyViolation, "unexpected data message") + }() + return ctx +} + +// SetReadLimit implements *Conn.SetReadLimit for wasm. +func (c *Conn) SetReadLimit(n int64) { + c.msgReadLimit.Store(n) +} + +func (c *Conn) setCloseErr(err error) { + c.closeErrOnce.Do(func() { + c.closeErr = fmt.Errorf("WebSocket closed: %w", err) + }) +} + +func (c *Conn) isClosed() bool { + select { + case <-c.closed: + return true + default: + return false + } +}