diff --git a/src/nvcgo/go.mod b/src/nvcgo/go.mod index 4abf86b33..7a1e89b4b 100644 --- a/src/nvcgo/go.mod +++ b/src/nvcgo/go.mod @@ -1,13 +1,11 @@ module nvcgo -go 1.17 +go 1.24.0 require ( - github.com/cilium/ebpf v0.8.0 + github.com/cilium/ebpf v0.20.0 github.com/google/uuid v1.6.0 github.com/opencontainers/runtime-spec v1.2.0 github.com/sirupsen/logrus v1.9.3 - golang.org/x/sys v0.21.0 + golang.org/x/sys v0.37.0 ) - -require golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect diff --git a/src/nvcgo/go.sum b/src/nvcgo/go.sum index 4d79ae80c..9713d655a 100644 --- a/src/nvcgo/go.sum +++ b/src/nvcgo/go.sum @@ -1,42 +1,44 @@ -github.com/cilium/ebpf v0.8.0 h1:2V6KSg3FRADVU2BMIRemZ0hV+9OM+aAHhZDjQyjJTAs= -github.com/cilium/ebpf v0.8.0/go.mod h1:f5zLIM0FSNuAkSyLAN7X+Hy6yznlF1mNiWUMfxMtrgk= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cilium/ebpf v0.20.0 h1:atwWj9d3NffHyPZzVlx3hmw1on5CLe9eljR8VuHTwhM= +github.com/cilium/ebpf v0.20.0/go.mod h1:pzLjFymM+uZPLk/IXZUL63xdx5VXEo+enTzxkZXdycw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= -github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/go-quicktest/qt v1.101.1-0.20240301121107-c6c8733fa1e6 h1:teYtXy9B7y5lHTp8V9KPxpYRAVA7dozigQcMiBust1s= +github.com/go-quicktest/qt v1.101.1-0.20240301121107-c6c8733fa1e6/go.mod h1:p4lGIVX+8Wa6ZPNDvqcxq36XpUDLh42FLetFU7odllI= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= +github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/jsimonetti/rtnetlink/v2 v2.0.1 h1:xda7qaHDSVOsADNouv7ukSuicKZO7GgVUCXxpaIEIlM= +github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= +github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= +golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/.clang-format b/src/nvcgo/vendor/github.com/cilium/ebpf/.clang-format index 4eb94b1ba..0ff425760 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/.clang-format +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/.clang-format @@ -4,6 +4,9 @@ BasedOnStyle: LLVM AlignAfterOpenBracket: DontAlign AlignConsecutiveAssignments: true AlignEscapedNewlines: DontAlign +# mkdocs annotations in source code are written as trailing comments +# and alignment pushes these really far away from the content. +AlignTrailingComments: false AlwaysBreakBeforeMultilineStrings: true AlwaysBreakTemplateDeclarations: false AllowAllParametersOfDeclarationOnNextLine: false @@ -14,4 +17,9 @@ KeepEmptyLinesAtTheStartOfBlocks: false TabWidth: 4 UseTab: ForContinuationAndIndentation ColumnLimit: 1000 +# Go compiler comments need to stay unindented. +CommentPragmas: '^go:.*' +# linux/bpf.h needs to be included before bpf/bpf_helpers.h for types like __u64 +# and sorting makes this impossible. +SortIncludes: false ... diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/.gitattributes b/src/nvcgo/vendor/github.com/cilium/ebpf/.gitattributes new file mode 100644 index 000000000..ea7c9a89c --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/.gitattributes @@ -0,0 +1,4 @@ +# Force line ending normalisation +* text=auto +# Show types.go in the PR diff view by default +internal/sys/types.go linguist-generated=false diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/.golangci.yaml b/src/nvcgo/vendor/github.com/cilium/ebpf/.golangci.yaml index dc62dd6d0..8f88708b2 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/.golangci.yaml +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/.golangci.yaml @@ -1,28 +1,29 @@ ---- -issues: - exclude-rules: - # syscall param structs will have unused fields in Go code. - - path: syscall.*.go - linters: - - structcheck - +version: "2" linters: - disable-all: true + default: none enable: - - deadcode - - errcheck - - goimports - - gosimple + - depguard - govet - ineffassign - misspell - - staticcheck - - structcheck - - typecheck - unused - - varcheck + settings: + depguard: + rules: + no-x-sys-unix: + files: + - '!**/internal/unix/*.go' + - '!**/examples/**/*.go' + - '!**/docs/**/*.go' + deny: + - pkg: golang.org/x/sys/unix + desc: use internal/unix instead - # Could be enabled later: - # - gocyclo - # - maligned - # - gosec +formatters: + enable: + - gofmt + - goimports + settings: + goimports: + local-prefixes: + - github.com/cilium/ebpf diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/.vimto.toml b/src/nvcgo/vendor/github.com/cilium/ebpf/.vimto.toml new file mode 100644 index 000000000..49a12dbc0 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/.vimto.toml @@ -0,0 +1,12 @@ +kernel="ghcr.io/cilium/ci-kernels:stable" +smp="cpus=2" +memory="1G" +user="root" +setup=[ + "mount -t cgroup2 -o nosuid,noexec,nodev cgroup2 /sys/fs/cgroup", + "/bin/sh -c 'modprobe bpf_testmod || true'", + "dmesg --clear", +] +teardown=[ + "dmesg --read-clear", +] diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/ARCHITECTURE.md b/src/nvcgo/vendor/github.com/cilium/ebpf/ARCHITECTURE.md deleted file mode 100644 index 8cd7e2486..000000000 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/ARCHITECTURE.md +++ /dev/null @@ -1,86 +0,0 @@ -Architecture of the library -=== - - ELF -> Specifications -> Objects -> Links - -ELF ---- - -BPF is usually produced by using Clang to compile a subset of C. Clang outputs -an ELF file which contains program byte code (aka BPF), but also metadata for -maps used by the program. The metadata follows the conventions set by libbpf -shipped with the kernel. Certain ELF sections have special meaning -and contain structures defined by libbpf. Newer versions of clang emit -additional metadata in BPF Type Format (aka BTF). - -The library aims to be compatible with libbpf so that moving from a C toolchain -to a Go one creates little friction. To that end, the [ELF reader](elf_reader.go) -is tested against the Linux selftests and avoids introducing custom behaviour -if possible. - -The output of the ELF reader is a `CollectionSpec` which encodes -all of the information contained in the ELF in a form that is easy to work with -in Go. - -### BTF - -The BPF Type Format describes more than just the types used by a BPF program. It -includes debug aids like which source line corresponds to which instructions and -what global variables are used. - -[BTF parsing](internal/btf/) lives in a separate internal package since exposing -it would mean an additional maintenance burden, and because the API still -has sharp corners. The most important concept is the `btf.Type` interface, which -also describes things that aren't really types like `.rodata` or `.bss` sections. -`btf.Type`s can form cyclical graphs, which can easily lead to infinite loops if -one is not careful. Hopefully a safe pattern to work with `btf.Type` emerges as -we write more code that deals with it. - -Specifications ---- - -`CollectionSpec`, `ProgramSpec` and `MapSpec` are blueprints for in-kernel -objects and contain everything necessary to execute the relevant `bpf(2)` -syscalls. Since the ELF reader outputs a `CollectionSpec` it's possible to -modify clang-compiled BPF code, for example to rewrite constants. At the same -time the [asm](asm/) package provides an assembler that can be used to generate -`ProgramSpec` on the fly. - -Creating a spec should never require any privileges or be restricted in any way, -for example by only allowing programs in native endianness. This ensures that -the library stays flexible. - -Objects ---- - -`Program` and `Map` are the result of loading specs into the kernel. Sometimes -loading a spec will fail because the kernel is too old, or a feature is not -enabled. There are multiple ways the library deals with that: - -* Fallback: older kernels don't allow naming programs and maps. The library - automatically detects support for names, and omits them during load if - necessary. This works since name is primarily a debug aid. - -* Sentinel error: sometimes it's possible to detect that a feature isn't available. - In that case the library will return an error wrapping `ErrNotSupported`. - This is also useful to skip tests that can't run on the current kernel. - -Once program and map objects are loaded they expose the kernel's low-level API, -e.g. `NextKey`. Often this API is awkward to use in Go, so there are safer -wrappers on top of the low-level API, like `MapIterator`. The low-level API is -useful when our higher-level API doesn't support a particular use case. - -Links ---- - -BPF can be attached to many different points in the kernel and newer BPF hooks -tend to use bpf_link to do so. Older hooks unfortunately use a combination of -syscalls, netlink messages, etc. Adding support for a new link type should not -pull in large dependencies like netlink, so XDP programs or tracepoints are -out of scope. - -Each bpf_link_type has one corresponding Go type, e.g. `link.tracing` corresponds -to BPF_LINK_TRACING. In general, these types should be unexported as long as they -don't export methods outside of the Link interface. Each Go type may have multiple -exported constructors. For example `AttachTracing` and `AttachLSM` create a -tracing link, but are distinct functions since they may require different arguments. diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/CODEOWNERS b/src/nvcgo/vendor/github.com/cilium/ebpf/CODEOWNERS new file mode 100644 index 000000000..bd0a61158 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/CODEOWNERS @@ -0,0 +1,11 @@ +* @cilium/ebpf-lib-maintainers + +features/ @rgo3 +link/ @mmat11 + +perf/ @florianl +ringbuf/ @florianl + +btf/ @dylandreimerink + +docs/ @ti-mo diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/CONTRIBUTING.md b/src/nvcgo/vendor/github.com/cilium/ebpf/CONTRIBUTING.md index 0d29eae81..673a9ac29 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/CONTRIBUTING.md +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/CONTRIBUTING.md @@ -1,40 +1,5 @@ -# How to contribute +# Contributing to ebpf-go -Development is on [GitHub](https://github.com/cilium/ebpf) and contributions in -the form of pull requests and issues reporting bugs or suggesting new features -are welcome. Please take a look at [the architecture](ARCHITECTURE.md) to get -a better understanding for the high-level goals. - -New features must be accompanied by tests. Before starting work on any large -feature, please [join](https://ebpf.io/slack) the -[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack to -discuss the design first. - -When submitting pull requests, consider writing details about what problem you -are solving and why the proposed approach solves that problem in commit messages -and/or pull request description to help future library users and maintainers to -reason about the proposed changes. - -## Running the tests - -Many of the tests require privileges to set resource limits and load eBPF code. -The easiest way to obtain these is to run the tests with `sudo`. - -To test the current package with your local kernel you can simply run: -``` -go test -exec sudo ./... -``` - -To test the current package with a different kernel version you can use the [run-tests.sh](run-tests.sh) script. -It requires [virtme](https://github.com/amluto/virtme) and qemu to be installed. - -Examples: - -```bash -# Run all tests on a 5.4 kernel -./run-tests.sh 5.4 - -# Run a subset of tests: -./run-tests.sh 5.4 go test ./link -``` +Want to contribute to ebpf-go? There are a few things you need to know. +We wrote a [contribution guide](https://ebpf-go.dev/contributing/) to help you get started. diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/MAINTAINERS.md b/src/nvcgo/vendor/github.com/cilium/ebpf/MAINTAINERS.md new file mode 100644 index 000000000..a56a03e39 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/MAINTAINERS.md @@ -0,0 +1,3 @@ +# Maintainers + +Maintainers can be found in the [Cilium Maintainers file](https://github.com/cilium/community/blob/main/roles/Maintainers.md) diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/Makefile b/src/nvcgo/vendor/github.com/cilium/ebpf/Makefile index 76a448caa..4f53b37f3 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/Makefile +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/Makefile @@ -1,9 +1,12 @@ # The development version of clang is distributed as the 'clang' binary, # while stable/released versions have a version number attached. # Pin the default clang to a stable version. -CLANG ?= clang-13 -STRIP ?= llvm-strip-13 -CFLAGS := -O2 -g -Wall -Werror $(CFLAGS) +CLANG ?= clang-20 +STRIP ?= llvm-strip-20 +OBJCOPY ?= llvm-objcopy-20 +CFLAGS := -O2 -g -Wall -Werror -mcpu=v2 $(CFLAGS) + +CI_KERNEL_URL ?= https://github.com/cilium/ci-kernels/raw/master/ # Obtain an absolute path to the directory of the Makefile. # Assume the Makefile is in the root of the repository. @@ -13,17 +16,19 @@ UIDGID := $(shell stat -c '%u:%g' ${REPODIR}) # Prefer podman if installed, otherwise use docker. # Note: Setting the var at runtime will always override. CONTAINER_ENGINE ?= $(if $(shell command -v podman), podman, docker) -CONTAINER_RUN_ARGS ?= $(if $(filter ${CONTAINER_ENGINE}, podman),, --user "${UIDGID}") +CONTAINER_RUN_ARGS ?= $(if $(filter ${CONTAINER_ENGINE}, podman), \ + --log-driver=none \ + -v "$(shell go env GOCACHE)":/root/.cache/go-build \ + -v "$(shell go env GOMODCACHE)":/go/pkg/mod, --user "${UIDGID}") IMAGE := $(shell cat ${REPODIR}/testdata/docker/IMAGE) VERSION := $(shell cat ${REPODIR}/testdata/docker/VERSION) -# clang <8 doesn't tag relocs properly (STT_NOTYPE) -# clang 9 is the first version emitting BTF TARGETS := \ - testdata/loader-clang-7 \ - testdata/loader-clang-9 \ + testdata/loader-clang-14 \ + testdata/loader-clang-17 \ testdata/loader-$(CLANG) \ + testdata/manyprogs \ testdata/btf_map_init \ testdata/invalid_map \ testdata/raw_tracepoint \ @@ -31,11 +36,26 @@ TARGETS := \ testdata/invalid_btf_map_init \ testdata/strings \ testdata/freplace \ + testdata/fentry_fexit \ testdata/iproute2_map_compat \ testdata/map_spin_lock \ testdata/subprog_reloc \ testdata/fwd_decl \ - internal/btf/testdata/relocs + testdata/kconfig \ + testdata/ksym \ + testdata/kfunc \ + testdata/invalid-kfunc \ + testdata/kfunc-kmod \ + testdata/constants \ + testdata/errors \ + testdata/variables \ + testdata/arena \ + btf/testdata/relocs \ + btf/testdata/relocs_read \ + btf/testdata/relocs_read_tgt \ + btf/testdata/relocs_enum \ + btf/testdata/tags \ + cmd/bpf2go/testdata/minimal .PHONY: all clean container-all container-shell generate @@ -43,33 +63,37 @@ TARGETS := \ # Build all ELF binaries using a containerized LLVM toolchain. container-all: - ${CONTAINER_ENGINE} run --rm ${CONTAINER_RUN_ARGS} \ + +${CONTAINER_ENGINE} run --rm -ti ${CONTAINER_RUN_ARGS} \ -v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \ - --env CFLAGS="-fdebug-prefix-map=/ebpf=." \ --env HOME="/tmp" \ + --env BPF2GO_CC="$(CLANG)" \ + --env BPF2GO_CFLAGS="$(CFLAGS)" \ "${IMAGE}:${VERSION}" \ - $(MAKE) all + make all # (debug) Drop the user into a shell inside the container as root. +# Set BPF2GO_ envs to make 'make generate' just work. container-shell: - ${CONTAINER_ENGINE} run --rm -ti \ + ${CONTAINER_ENGINE} run --rm -ti ${CONTAINER_RUN_ARGS} \ -v "${REPODIR}":/ebpf -w /ebpf \ + --env BPF2GO_CC="$(CLANG)" \ + --env BPF2GO_CFLAGS="$(CFLAGS)" \ "${IMAGE}:${VERSION}" clean: - -$(RM) testdata/*.elf - -$(RM) internal/btf/testdata/*.elf + find "$(CURDIR)" -name "*.elf" -delete + find "$(CURDIR)" -name "*.o" -delete + +format: + find . -type f -name "*.c" | xargs clang-format -i -all: $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) generate +all: format $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) generate ln -srf testdata/loader-$(CLANG)-el.elf testdata/loader-el.elf ln -srf testdata/loader-$(CLANG)-eb.elf testdata/loader-eb.elf -# $BPF_CLANG is used in go:generate invocations. -generate: export BPF_CLANG := $(CLANG) -generate: export BPF_CFLAGS := $(CFLAGS) generate: - go generate ./cmd/bpf2go/test - cd examples/ && go generate ./... + go generate -run "gentypes" ./... + go generate -skip "gentypes" ./... testdata/loader-%-el.elf: testdata/loader.c $* $(CFLAGS) -target bpfel -c $< -o $@ @@ -87,8 +111,8 @@ testdata/loader-%-eb.elf: testdata/loader.c $(CLANG) $(CFLAGS) -target bpfeb -c $< -o $@ $(STRIP) -g $@ -# Usage: make VMLINUX=/path/to/vmlinux vmlinux-btf -.PHONY: vmlinux-btf -vmlinux-btf: internal/btf/testdata/vmlinux-btf.gz -internal/btf/testdata/vmlinux-btf.gz: $(VMLINUX) - objcopy --dump-section .BTF=/dev/stdout "$<" /dev/null | gzip > "$@" +.PHONY: update-kernel-deps +update-kernel-deps: export KERNEL_VERSION?=6.8 +update-kernel-deps: + ./testdata/sh/update-kernel-deps.sh + $(MAKE) container-all diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/README.md b/src/nvcgo/vendor/github.com/cilium/ebpf/README.md index 69a6bb0e9..01a154c61 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/README.md +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/README.md @@ -2,35 +2,38 @@ [![PkgGoDev](https://pkg.go.dev/badge/github.com/cilium/ebpf)](https://pkg.go.dev/github.com/cilium/ebpf) -![HoneyGopher](.github/images/cilium-ebpf.png) +![HoneyGopher](docs/ebpf/ebpf-go.png) -eBPF is a pure Go library that provides utilities for loading, compiling, and +ebpf-go is a pure Go library that provides utilities for loading, compiling, and debugging eBPF programs. It has minimal external dependencies and is intended to be used in long running processes. -The library is maintained by [Cloudflare](https://www.cloudflare.com) and -[Cilium](https://www.cilium.io). - -See [ebpf.io](https://ebpf.io) for other projects from the eBPF ecosystem. +See [ebpf.io](https://ebpf.io) for complementary projects from the wider eBPF +ecosystem. ## Getting Started -A small collection of Go and eBPF programs that serve as examples for building -your own tools can be found under [examples/](examples/). +Please take a look at our [Getting Started] guide. -Contributions are highly encouraged, as they highlight certain use cases of +[Contributions](https://ebpf-go.dev/contributing) are highly encouraged, as they highlight certain use cases of eBPF and the library, and help shape the future of the project. ## Getting Help -Please -[join](https://ebpf.io/slack) the +The community actively monitors our [GitHub Discussions](https://github.com/cilium/ebpf/discussions) page. +Please search for existing threads before starting a new one. Refrain from +opening issues on the bug tracker if you're just starting out or if you're not +sure if something is a bug in the library code. + +Alternatively, [join](https://ebpf.io/slack) the [#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack if you -have questions regarding the library. +have other questions regarding the project. Note that this channel is ephemeral +and has its history erased past a certain point, which is less helpful for +others running into the same problem later. ## Packages -This library includes the following packages: +This library includes the following packages: * [asm](https://pkg.go.dev/github.com/cilium/ebpf/asm) contains a basic assembler, allowing you to write eBPF assembly instructions directly @@ -38,7 +41,7 @@ This library includes the following packages: * [cmd/bpf2go](https://pkg.go.dev/github.com/cilium/ebpf/cmd/bpf2go) allows compiling and embedding eBPF programs written in C within Go code. As well as compiling the C code, it auto-generates Go code for loading and manipulating - the eBPF program and map objects. + the eBPF program and map objects. * [link](https://pkg.go.dev/github.com/cilium/ebpf/link) allows attaching eBPF to various hooks * [perf](https://pkg.go.dev/github.com/cilium/ebpf/perf) allows reading from a @@ -49,23 +52,18 @@ This library includes the following packages: of `bpftool feature probe` for discovering BPF-related kernel features using native Go. * [rlimit](https://pkg.go.dev/github.com/cilium/ebpf/rlimit) provides a convenient API to lift the `RLIMIT_MEMLOCK` constraint on kernels before 5.11. +* [btf](https://pkg.go.dev/github.com/cilium/ebpf/btf) allows reading the BPF Type Format. +* [pin](https://pkg.go.dev/github.com/cilium/ebpf/pin) provides APIs for working with pinned objects on bpffs. ## Requirements * A version of Go that is [supported by upstream](https://golang.org/doc/devel/release.html#policy) -* Linux >= 4.4. CI is run against LTS releases. - -## Regenerating Testdata - -Run `make` in the root of this repository to rebuild testdata in all -subpackages. This requires Docker, as it relies on a standardized build -environment to keep the build output stable. - -It is possible to regenerate data using Podman by overriding the `CONTAINER_*` -variables: `CONTAINER_ENGINE=podman CONTAINER_RUN_ARGS= make`. - -The toolchain image build files are kept in [testdata/docker/](testdata/docker/). +* Linux (amd64, arm64): CI is run against kernel.org LTS releases. >= 4.4 should work but EOL'ed + versions are not supported. +* Windows (amd64): CI is run against Windows Server 2022. Only the latest eBPF for Windows + release is supported. +* Other architectures are best effort. 32bit arches are not supported. ## License @@ -74,3 +72,5 @@ MIT ### eBPF Gopher The eBPF honeygopher is based on the Go gopher designed by Renee French. + +[Getting Started]: https://ebpf-go.dev/guides/getting-started/ diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/alu.go b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/alu.go index 70ccc4d15..a4ae72212 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/alu.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/alu.go @@ -1,26 +1,26 @@ package asm -//go:generate stringer -output alu_string.go -type=Source,Endianness,ALUOp +//go:generate go tool stringer -output alu_string.go -type=Source,Endianness,ALUOp // Source of ALU / ALU64 / Branch operations // -// msb lsb -// +----+-+---+ -// |op |S|cls| -// +----+-+---+ -type Source uint8 +// msb lsb +// +------------+-+---+ +// | op |S|cls| +// +------------+-+---+ +type Source uint16 -const sourceMask OpCode = 0x08 +const sourceMask OpCode = 0x0008 // Source bitmask const ( // InvalidSource is returned by getters when invoked // on non ALU / branch OpCodes. - InvalidSource Source = 0xff + InvalidSource Source = 0xffff // ImmSource src is from constant - ImmSource Source = 0x00 + ImmSource Source = 0x0000 // RegSource src is from register - RegSource Source = 0x08 + RegSource Source = 0x0008 ) // The Endianness of a byte swap instruction. @@ -39,46 +39,56 @@ const ( // ALUOp are ALU / ALU64 operations // -// msb lsb -// +----+-+---+ -// |OP |s|cls| -// +----+-+---+ -type ALUOp uint8 +// msb lsb +// +-------+----+-+---+ +// | EXT | OP |s|cls| +// +-------+----+-+---+ +type ALUOp uint16 -const aluMask OpCode = 0xf0 +const aluMask OpCode = 0x3ff0 const ( // InvalidALUOp is returned by getters when invoked // on non ALU OpCodes - InvalidALUOp ALUOp = 0xff + InvalidALUOp ALUOp = 0xffff // Add - addition - Add ALUOp = 0x00 + Add ALUOp = 0x0000 // Sub - subtraction - Sub ALUOp = 0x10 + Sub ALUOp = 0x0010 // Mul - multiplication - Mul ALUOp = 0x20 + Mul ALUOp = 0x0020 // Div - division - Div ALUOp = 0x30 + Div ALUOp = 0x0030 + // SDiv - signed division + SDiv ALUOp = Div + 0x0100 // Or - bitwise or - Or ALUOp = 0x40 + Or ALUOp = 0x0040 // And - bitwise and - And ALUOp = 0x50 + And ALUOp = 0x0050 // LSh - bitwise shift left - LSh ALUOp = 0x60 + LSh ALUOp = 0x0060 // RSh - bitwise shift right - RSh ALUOp = 0x70 + RSh ALUOp = 0x0070 // Neg - sign/unsign signing bit - Neg ALUOp = 0x80 + Neg ALUOp = 0x0080 // Mod - modulo - Mod ALUOp = 0x90 + Mod ALUOp = 0x0090 + // SMod - signed modulo + SMod ALUOp = Mod + 0x0100 // Xor - bitwise xor - Xor ALUOp = 0xa0 + Xor ALUOp = 0x00a0 // Mov - move value from one place to another - Mov ALUOp = 0xb0 - // ArSh - arithmatic shift - ArSh ALUOp = 0xc0 + Mov ALUOp = 0x00b0 + // MovSX8 - move lower 8 bits, sign extended upper bits of target + MovSX8 ALUOp = Mov + 0x0100 + // MovSX16 - move lower 16 bits, sign extended upper bits of target + MovSX16 ALUOp = Mov + 0x0200 + // MovSX32 - move lower 32 bits, sign extended upper bits of target + MovSX32 ALUOp = Mov + 0x0300 + // ArSh - arithmetic shift + ArSh ALUOp = 0x00c0 // Swap - endian conversions - Swap ALUOp = 0xd0 + Swap ALUOp = 0x00d0 ) // HostTo converts from host to another endianness. @@ -102,6 +112,27 @@ func HostTo(endian Endianness, dst Register, size Size) Instruction { } } +// BSwap unconditionally reverses the order of bytes in a register. +func BSwap(dst Register, size Size) Instruction { + var imm int64 + switch size { + case Half: + imm = 16 + case Word: + imm = 32 + case DWord: + imm = 64 + default: + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: OpCode(ALU64Class).SetALUOp(Swap), + Dst: dst, + Constant: imm, + } +} + // Op returns the OpCode for an ALU operation with a given source. func (op ALUOp) Op(source Source) OpCode { return OpCode(ALU64Class).SetALUOp(op).SetSource(source) diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/alu_string.go b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/alu_string.go index 72d3fe629..35b406bf3 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/alu_string.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/alu_string.go @@ -8,7 +8,7 @@ func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} - _ = x[InvalidSource-255] + _ = x[InvalidSource-65535] _ = x[ImmSource-0] _ = x[RegSource-8] } @@ -25,7 +25,7 @@ func (i Source) String() string { return _Source_name_0 case i == 8: return _Source_name_1 - case i == 255: + case i == 65535: return _Source_name_2 default: return "Source(" + strconv.FormatInt(int64(i), 10) + ")" @@ -62,41 +62,51 @@ func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} - _ = x[InvalidALUOp-255] + _ = x[InvalidALUOp-65535] _ = x[Add-0] _ = x[Sub-16] _ = x[Mul-32] _ = x[Div-48] + _ = x[SDiv-304] _ = x[Or-64] _ = x[And-80] _ = x[LSh-96] _ = x[RSh-112] _ = x[Neg-128] _ = x[Mod-144] + _ = x[SMod-400] _ = x[Xor-160] _ = x[Mov-176] + _ = x[MovSX8-432] + _ = x[MovSX16-688] + _ = x[MovSX32-944] _ = x[ArSh-192] _ = x[Swap-208] } -const _ALUOp_name = "AddSubMulDivOrAndLShRShNegModXorMovArShSwapInvalidALUOp" +const _ALUOp_name = "AddSubMulDivOrAndLShRShNegModXorMovArShSwapSDivSModMovSX8MovSX16MovSX32InvalidALUOp" var _ALUOp_map = map[ALUOp]string{ - 0: _ALUOp_name[0:3], - 16: _ALUOp_name[3:6], - 32: _ALUOp_name[6:9], - 48: _ALUOp_name[9:12], - 64: _ALUOp_name[12:14], - 80: _ALUOp_name[14:17], - 96: _ALUOp_name[17:20], - 112: _ALUOp_name[20:23], - 128: _ALUOp_name[23:26], - 144: _ALUOp_name[26:29], - 160: _ALUOp_name[29:32], - 176: _ALUOp_name[32:35], - 192: _ALUOp_name[35:39], - 208: _ALUOp_name[39:43], - 255: _ALUOp_name[43:55], + 0: _ALUOp_name[0:3], + 16: _ALUOp_name[3:6], + 32: _ALUOp_name[6:9], + 48: _ALUOp_name[9:12], + 64: _ALUOp_name[12:14], + 80: _ALUOp_name[14:17], + 96: _ALUOp_name[17:20], + 112: _ALUOp_name[20:23], + 128: _ALUOp_name[23:26], + 144: _ALUOp_name[26:29], + 160: _ALUOp_name[29:32], + 176: _ALUOp_name[32:35], + 192: _ALUOp_name[35:39], + 208: _ALUOp_name[39:43], + 304: _ALUOp_name[43:47], + 400: _ALUOp_name[47:51], + 432: _ALUOp_name[51:57], + 688: _ALUOp_name[57:64], + 944: _ALUOp_name[64:71], + 65535: _ALUOp_name[71:83], } func (i ALUOp) String() string { diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/func.go b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/func.go index b75a2934e..fe75c7578 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/func.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/func.go @@ -1,203 +1,18 @@ package asm -//go:generate stringer -output func_string.go -type=BuiltinFunc +import "github.com/cilium/ebpf/internal/platform" + +//go:generate go tool stringer -output func_string.go -type=BuiltinFunc // BuiltinFunc is a built-in eBPF function. -type BuiltinFunc int32 +type BuiltinFunc uint32 -// eBPF built-in functions -// -// You can regenerate this list using the following gawk script: +// BuiltinFuncForPlatform returns a platform specific function constant. // -// /FN\(.+\),/ { -// match($1, /\((.+)\)/, r) -// split(r[1], p, "_") -// printf "Fn" -// for (i in p) { -// printf "%s%s", toupper(substr(p[i], 1, 1)), substr(p[i], 2) -// } -// print "" -// } -// -// The script expects include/uapi/linux/bpf.h as it's input. -const ( - FnUnspec BuiltinFunc = iota - FnMapLookupElem - FnMapUpdateElem - FnMapDeleteElem - FnProbeRead - FnKtimeGetNs - FnTracePrintk - FnGetPrandomU32 - FnGetSmpProcessorId - FnSkbStoreBytes - FnL3CsumReplace - FnL4CsumReplace - FnTailCall - FnCloneRedirect - FnGetCurrentPidTgid - FnGetCurrentUidGid - FnGetCurrentComm - FnGetCgroupClassid - FnSkbVlanPush - FnSkbVlanPop - FnSkbGetTunnelKey - FnSkbSetTunnelKey - FnPerfEventRead - FnRedirect - FnGetRouteRealm - FnPerfEventOutput - FnSkbLoadBytes - FnGetStackid - FnCsumDiff - FnSkbGetTunnelOpt - FnSkbSetTunnelOpt - FnSkbChangeProto - FnSkbChangeType - FnSkbUnderCgroup - FnGetHashRecalc - FnGetCurrentTask - FnProbeWriteUser - FnCurrentTaskUnderCgroup - FnSkbChangeTail - FnSkbPullData - FnCsumUpdate - FnSetHashInvalid - FnGetNumaNodeId - FnSkbChangeHead - FnXdpAdjustHead - FnProbeReadStr - FnGetSocketCookie - FnGetSocketUid - FnSetHash - FnSetsockopt - FnSkbAdjustRoom - FnRedirectMap - FnSkRedirectMap - FnSockMapUpdate - FnXdpAdjustMeta - FnPerfEventReadValue - FnPerfProgReadValue - FnGetsockopt - FnOverrideReturn - FnSockOpsCbFlagsSet - FnMsgRedirectMap - FnMsgApplyBytes - FnMsgCorkBytes - FnMsgPullData - FnBind - FnXdpAdjustTail - FnSkbGetXfrmState - FnGetStack - FnSkbLoadBytesRelative - FnFibLookup - FnSockHashUpdate - FnMsgRedirectHash - FnSkRedirectHash - FnLwtPushEncap - FnLwtSeg6StoreBytes - FnLwtSeg6AdjustSrh - FnLwtSeg6Action - FnRcRepeat - FnRcKeydown - FnSkbCgroupId - FnGetCurrentCgroupId - FnGetLocalStorage - FnSkSelectReuseport - FnSkbAncestorCgroupId - FnSkLookupTcp - FnSkLookupUdp - FnSkRelease - FnMapPushElem - FnMapPopElem - FnMapPeekElem - FnMsgPushData - FnMsgPopData - FnRcPointerRel - FnSpinLock - FnSpinUnlock - FnSkFullsock - FnTcpSock - FnSkbEcnSetCe - FnGetListenerSock - FnSkcLookupTcp - FnTcpCheckSyncookie - FnSysctlGetName - FnSysctlGetCurrentValue - FnSysctlGetNewValue - FnSysctlSetNewValue - FnStrtol - FnStrtoul - FnSkStorageGet - FnSkStorageDelete - FnSendSignal - FnTcpGenSyncookie - FnSkbOutput - FnProbeReadUser - FnProbeReadKernel - FnProbeReadUserStr - FnProbeReadKernelStr - FnTcpSendAck - FnSendSignalThread - FnJiffies64 - FnReadBranchRecords - FnGetNsCurrentPidTgid - FnXdpOutput - FnGetNetnsCookie - FnGetCurrentAncestorCgroupId - FnSkAssign - FnKtimeGetBootNs - FnSeqPrintf - FnSeqWrite - FnSkCgroupId - FnSkAncestorCgroupId - FnRingbufOutput - FnRingbufReserve - FnRingbufSubmit - FnRingbufDiscard - FnRingbufQuery - FnCsumLevel - FnSkcToTcp6Sock - FnSkcToTcpSock - FnSkcToTcpTimewaitSock - FnSkcToTcpRequestSock - FnSkcToUdp6Sock - FnGetTaskStack - FnLoadHdrOpt - FnStoreHdrOpt - FnReserveHdrOpt - FnInodeStorageGet - FnInodeStorageDelete - FnDPath - FnCopyFromUser - FnSnprintfBtf - FnSeqPrintfBtf - FnSkbCgroupClassid - FnRedirectNeigh - FnPerCpuPtr - FnThisCpuPtr - FnRedirectPeer - FnTaskStorageGet - FnTaskStorageDelete - FnGetCurrentTaskBtf - FnBprmOptsSet - FnKtimeGetCoarseNs - FnImaInodeHash - FnSockFromFile - FnCheckMtu - FnForEachMapElem - FnSnprintf - FnSysBpf - FnBtfFindByNameKind - FnSysClose - FnTimerInit - FnTimerSetCallback - FnTimerStart - FnTimerCancel - FnGetFuncIp - FnGetAttachCookie - FnTaskPtRegs -) +// Use this if the library doesn't provide a constant yet. +func BuiltinFuncForPlatform(plat string, value uint32) (BuiltinFunc, error) { + return platform.EncodeConstant[BuiltinFunc](plat, value) +} // Call emits a function call. func (fn BuiltinFunc) Call() Instruction { diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/func_lin.go b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/func_lin.go new file mode 100644 index 000000000..1dd026d62 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/func_lin.go @@ -0,0 +1,223 @@ +// Code generated by internal/cmd/genfunctions.awk; DO NOT EDIT. + +package asm + +// Code in this file is derived from Linux, available under the GPL-2.0 WITH Linux-syscall-note. + +import "github.com/cilium/ebpf/internal/platform" + +// Built-in functions (Linux). +const ( + FnUnspec = BuiltinFunc(platform.LinuxTag | 0) //lint:ignore SA4016 consistency + FnMapLookupElem = BuiltinFunc(platform.LinuxTag | 1) + FnMapUpdateElem = BuiltinFunc(platform.LinuxTag | 2) + FnMapDeleteElem = BuiltinFunc(platform.LinuxTag | 3) + FnProbeRead = BuiltinFunc(platform.LinuxTag | 4) + FnKtimeGetNs = BuiltinFunc(platform.LinuxTag | 5) + FnTracePrintk = BuiltinFunc(platform.LinuxTag | 6) + FnGetPrandomU32 = BuiltinFunc(platform.LinuxTag | 7) + FnGetSmpProcessorId = BuiltinFunc(platform.LinuxTag | 8) + FnSkbStoreBytes = BuiltinFunc(platform.LinuxTag | 9) + FnL3CsumReplace = BuiltinFunc(platform.LinuxTag | 10) + FnL4CsumReplace = BuiltinFunc(platform.LinuxTag | 11) + FnTailCall = BuiltinFunc(platform.LinuxTag | 12) + FnCloneRedirect = BuiltinFunc(platform.LinuxTag | 13) + FnGetCurrentPidTgid = BuiltinFunc(platform.LinuxTag | 14) + FnGetCurrentUidGid = BuiltinFunc(platform.LinuxTag | 15) + FnGetCurrentComm = BuiltinFunc(platform.LinuxTag | 16) + FnGetCgroupClassid = BuiltinFunc(platform.LinuxTag | 17) + FnSkbVlanPush = BuiltinFunc(platform.LinuxTag | 18) + FnSkbVlanPop = BuiltinFunc(platform.LinuxTag | 19) + FnSkbGetTunnelKey = BuiltinFunc(platform.LinuxTag | 20) + FnSkbSetTunnelKey = BuiltinFunc(platform.LinuxTag | 21) + FnPerfEventRead = BuiltinFunc(platform.LinuxTag | 22) + FnRedirect = BuiltinFunc(platform.LinuxTag | 23) + FnGetRouteRealm = BuiltinFunc(platform.LinuxTag | 24) + FnPerfEventOutput = BuiltinFunc(platform.LinuxTag | 25) + FnSkbLoadBytes = BuiltinFunc(platform.LinuxTag | 26) + FnGetStackid = BuiltinFunc(platform.LinuxTag | 27) + FnCsumDiff = BuiltinFunc(platform.LinuxTag | 28) + FnSkbGetTunnelOpt = BuiltinFunc(platform.LinuxTag | 29) + FnSkbSetTunnelOpt = BuiltinFunc(platform.LinuxTag | 30) + FnSkbChangeProto = BuiltinFunc(platform.LinuxTag | 31) + FnSkbChangeType = BuiltinFunc(platform.LinuxTag | 32) + FnSkbUnderCgroup = BuiltinFunc(platform.LinuxTag | 33) + FnGetHashRecalc = BuiltinFunc(platform.LinuxTag | 34) + FnGetCurrentTask = BuiltinFunc(platform.LinuxTag | 35) + FnProbeWriteUser = BuiltinFunc(platform.LinuxTag | 36) + FnCurrentTaskUnderCgroup = BuiltinFunc(platform.LinuxTag | 37) + FnSkbChangeTail = BuiltinFunc(platform.LinuxTag | 38) + FnSkbPullData = BuiltinFunc(platform.LinuxTag | 39) + FnCsumUpdate = BuiltinFunc(platform.LinuxTag | 40) + FnSetHashInvalid = BuiltinFunc(platform.LinuxTag | 41) + FnGetNumaNodeId = BuiltinFunc(platform.LinuxTag | 42) + FnSkbChangeHead = BuiltinFunc(platform.LinuxTag | 43) + FnXdpAdjustHead = BuiltinFunc(platform.LinuxTag | 44) + FnProbeReadStr = BuiltinFunc(platform.LinuxTag | 45) + FnGetSocketCookie = BuiltinFunc(platform.LinuxTag | 46) + FnGetSocketUid = BuiltinFunc(platform.LinuxTag | 47) + FnSetHash = BuiltinFunc(platform.LinuxTag | 48) + FnSetsockopt = BuiltinFunc(platform.LinuxTag | 49) + FnSkbAdjustRoom = BuiltinFunc(platform.LinuxTag | 50) + FnRedirectMap = BuiltinFunc(platform.LinuxTag | 51) + FnSkRedirectMap = BuiltinFunc(platform.LinuxTag | 52) + FnSockMapUpdate = BuiltinFunc(platform.LinuxTag | 53) + FnXdpAdjustMeta = BuiltinFunc(platform.LinuxTag | 54) + FnPerfEventReadValue = BuiltinFunc(platform.LinuxTag | 55) + FnPerfProgReadValue = BuiltinFunc(platform.LinuxTag | 56) + FnGetsockopt = BuiltinFunc(platform.LinuxTag | 57) + FnOverrideReturn = BuiltinFunc(platform.LinuxTag | 58) + FnSockOpsCbFlagsSet = BuiltinFunc(platform.LinuxTag | 59) + FnMsgRedirectMap = BuiltinFunc(platform.LinuxTag | 60) + FnMsgApplyBytes = BuiltinFunc(platform.LinuxTag | 61) + FnMsgCorkBytes = BuiltinFunc(platform.LinuxTag | 62) + FnMsgPullData = BuiltinFunc(platform.LinuxTag | 63) + FnBind = BuiltinFunc(platform.LinuxTag | 64) + FnXdpAdjustTail = BuiltinFunc(platform.LinuxTag | 65) + FnSkbGetXfrmState = BuiltinFunc(platform.LinuxTag | 66) + FnGetStack = BuiltinFunc(platform.LinuxTag | 67) + FnSkbLoadBytesRelative = BuiltinFunc(platform.LinuxTag | 68) + FnFibLookup = BuiltinFunc(platform.LinuxTag | 69) + FnSockHashUpdate = BuiltinFunc(platform.LinuxTag | 70) + FnMsgRedirectHash = BuiltinFunc(platform.LinuxTag | 71) + FnSkRedirectHash = BuiltinFunc(platform.LinuxTag | 72) + FnLwtPushEncap = BuiltinFunc(platform.LinuxTag | 73) + FnLwtSeg6StoreBytes = BuiltinFunc(platform.LinuxTag | 74) + FnLwtSeg6AdjustSrh = BuiltinFunc(platform.LinuxTag | 75) + FnLwtSeg6Action = BuiltinFunc(platform.LinuxTag | 76) + FnRcRepeat = BuiltinFunc(platform.LinuxTag | 77) + FnRcKeydown = BuiltinFunc(platform.LinuxTag | 78) + FnSkbCgroupId = BuiltinFunc(platform.LinuxTag | 79) + FnGetCurrentCgroupId = BuiltinFunc(platform.LinuxTag | 80) + FnGetLocalStorage = BuiltinFunc(platform.LinuxTag | 81) + FnSkSelectReuseport = BuiltinFunc(platform.LinuxTag | 82) + FnSkbAncestorCgroupId = BuiltinFunc(platform.LinuxTag | 83) + FnSkLookupTcp = BuiltinFunc(platform.LinuxTag | 84) + FnSkLookupUdp = BuiltinFunc(platform.LinuxTag | 85) + FnSkRelease = BuiltinFunc(platform.LinuxTag | 86) + FnMapPushElem = BuiltinFunc(platform.LinuxTag | 87) + FnMapPopElem = BuiltinFunc(platform.LinuxTag | 88) + FnMapPeekElem = BuiltinFunc(platform.LinuxTag | 89) + FnMsgPushData = BuiltinFunc(platform.LinuxTag | 90) + FnMsgPopData = BuiltinFunc(platform.LinuxTag | 91) + FnRcPointerRel = BuiltinFunc(platform.LinuxTag | 92) + FnSpinLock = BuiltinFunc(platform.LinuxTag | 93) + FnSpinUnlock = BuiltinFunc(platform.LinuxTag | 94) + FnSkFullsock = BuiltinFunc(platform.LinuxTag | 95) + FnTcpSock = BuiltinFunc(platform.LinuxTag | 96) + FnSkbEcnSetCe = BuiltinFunc(platform.LinuxTag | 97) + FnGetListenerSock = BuiltinFunc(platform.LinuxTag | 98) + FnSkcLookupTcp = BuiltinFunc(platform.LinuxTag | 99) + FnTcpCheckSyncookie = BuiltinFunc(platform.LinuxTag | 100) + FnSysctlGetName = BuiltinFunc(platform.LinuxTag | 101) + FnSysctlGetCurrentValue = BuiltinFunc(platform.LinuxTag | 102) + FnSysctlGetNewValue = BuiltinFunc(platform.LinuxTag | 103) + FnSysctlSetNewValue = BuiltinFunc(platform.LinuxTag | 104) + FnStrtol = BuiltinFunc(platform.LinuxTag | 105) + FnStrtoul = BuiltinFunc(platform.LinuxTag | 106) + FnSkStorageGet = BuiltinFunc(platform.LinuxTag | 107) + FnSkStorageDelete = BuiltinFunc(platform.LinuxTag | 108) + FnSendSignal = BuiltinFunc(platform.LinuxTag | 109) + FnTcpGenSyncookie = BuiltinFunc(platform.LinuxTag | 110) + FnSkbOutput = BuiltinFunc(platform.LinuxTag | 111) + FnProbeReadUser = BuiltinFunc(platform.LinuxTag | 112) + FnProbeReadKernel = BuiltinFunc(platform.LinuxTag | 113) + FnProbeReadUserStr = BuiltinFunc(platform.LinuxTag | 114) + FnProbeReadKernelStr = BuiltinFunc(platform.LinuxTag | 115) + FnTcpSendAck = BuiltinFunc(platform.LinuxTag | 116) + FnSendSignalThread = BuiltinFunc(platform.LinuxTag | 117) + FnJiffies64 = BuiltinFunc(platform.LinuxTag | 118) + FnReadBranchRecords = BuiltinFunc(platform.LinuxTag | 119) + FnGetNsCurrentPidTgid = BuiltinFunc(platform.LinuxTag | 120) + FnXdpOutput = BuiltinFunc(platform.LinuxTag | 121) + FnGetNetnsCookie = BuiltinFunc(platform.LinuxTag | 122) + FnGetCurrentAncestorCgroupId = BuiltinFunc(platform.LinuxTag | 123) + FnSkAssign = BuiltinFunc(platform.LinuxTag | 124) + FnKtimeGetBootNs = BuiltinFunc(platform.LinuxTag | 125) + FnSeqPrintf = BuiltinFunc(platform.LinuxTag | 126) + FnSeqWrite = BuiltinFunc(platform.LinuxTag | 127) + FnSkCgroupId = BuiltinFunc(platform.LinuxTag | 128) + FnSkAncestorCgroupId = BuiltinFunc(platform.LinuxTag | 129) + FnRingbufOutput = BuiltinFunc(platform.LinuxTag | 130) + FnRingbufReserve = BuiltinFunc(platform.LinuxTag | 131) + FnRingbufSubmit = BuiltinFunc(platform.LinuxTag | 132) + FnRingbufDiscard = BuiltinFunc(platform.LinuxTag | 133) + FnRingbufQuery = BuiltinFunc(platform.LinuxTag | 134) + FnCsumLevel = BuiltinFunc(platform.LinuxTag | 135) + FnSkcToTcp6Sock = BuiltinFunc(platform.LinuxTag | 136) + FnSkcToTcpSock = BuiltinFunc(platform.LinuxTag | 137) + FnSkcToTcpTimewaitSock = BuiltinFunc(platform.LinuxTag | 138) + FnSkcToTcpRequestSock = BuiltinFunc(platform.LinuxTag | 139) + FnSkcToUdp6Sock = BuiltinFunc(platform.LinuxTag | 140) + FnGetTaskStack = BuiltinFunc(platform.LinuxTag | 141) + FnLoadHdrOpt = BuiltinFunc(platform.LinuxTag | 142) + FnStoreHdrOpt = BuiltinFunc(platform.LinuxTag | 143) + FnReserveHdrOpt = BuiltinFunc(platform.LinuxTag | 144) + FnInodeStorageGet = BuiltinFunc(platform.LinuxTag | 145) + FnInodeStorageDelete = BuiltinFunc(platform.LinuxTag | 146) + FnDPath = BuiltinFunc(platform.LinuxTag | 147) + FnCopyFromUser = BuiltinFunc(platform.LinuxTag | 148) + FnSnprintfBtf = BuiltinFunc(platform.LinuxTag | 149) + FnSeqPrintfBtf = BuiltinFunc(platform.LinuxTag | 150) + FnSkbCgroupClassid = BuiltinFunc(platform.LinuxTag | 151) + FnRedirectNeigh = BuiltinFunc(platform.LinuxTag | 152) + FnPerCpuPtr = BuiltinFunc(platform.LinuxTag | 153) + FnThisCpuPtr = BuiltinFunc(platform.LinuxTag | 154) + FnRedirectPeer = BuiltinFunc(platform.LinuxTag | 155) + FnTaskStorageGet = BuiltinFunc(platform.LinuxTag | 156) + FnTaskStorageDelete = BuiltinFunc(platform.LinuxTag | 157) + FnGetCurrentTaskBtf = BuiltinFunc(platform.LinuxTag | 158) + FnBprmOptsSet = BuiltinFunc(platform.LinuxTag | 159) + FnKtimeGetCoarseNs = BuiltinFunc(platform.LinuxTag | 160) + FnImaInodeHash = BuiltinFunc(platform.LinuxTag | 161) + FnSockFromFile = BuiltinFunc(platform.LinuxTag | 162) + FnCheckMtu = BuiltinFunc(platform.LinuxTag | 163) + FnForEachMapElem = BuiltinFunc(platform.LinuxTag | 164) + FnSnprintf = BuiltinFunc(platform.LinuxTag | 165) + FnSysBpf = BuiltinFunc(platform.LinuxTag | 166) + FnBtfFindByNameKind = BuiltinFunc(platform.LinuxTag | 167) + FnSysClose = BuiltinFunc(platform.LinuxTag | 168) + FnTimerInit = BuiltinFunc(platform.LinuxTag | 169) + FnTimerSetCallback = BuiltinFunc(platform.LinuxTag | 170) + FnTimerStart = BuiltinFunc(platform.LinuxTag | 171) + FnTimerCancel = BuiltinFunc(platform.LinuxTag | 172) + FnGetFuncIp = BuiltinFunc(platform.LinuxTag | 173) + FnGetAttachCookie = BuiltinFunc(platform.LinuxTag | 174) + FnTaskPtRegs = BuiltinFunc(platform.LinuxTag | 175) + FnGetBranchSnapshot = BuiltinFunc(platform.LinuxTag | 176) + FnTraceVprintk = BuiltinFunc(platform.LinuxTag | 177) + FnSkcToUnixSock = BuiltinFunc(platform.LinuxTag | 178) + FnKallsymsLookupName = BuiltinFunc(platform.LinuxTag | 179) + FnFindVma = BuiltinFunc(platform.LinuxTag | 180) + FnLoop = BuiltinFunc(platform.LinuxTag | 181) + FnStrncmp = BuiltinFunc(platform.LinuxTag | 182) + FnGetFuncArg = BuiltinFunc(platform.LinuxTag | 183) + FnGetFuncRet = BuiltinFunc(platform.LinuxTag | 184) + FnGetFuncArgCnt = BuiltinFunc(platform.LinuxTag | 185) + FnGetRetval = BuiltinFunc(platform.LinuxTag | 186) + FnSetRetval = BuiltinFunc(platform.LinuxTag | 187) + FnXdpGetBuffLen = BuiltinFunc(platform.LinuxTag | 188) + FnXdpLoadBytes = BuiltinFunc(platform.LinuxTag | 189) + FnXdpStoreBytes = BuiltinFunc(platform.LinuxTag | 190) + FnCopyFromUserTask = BuiltinFunc(platform.LinuxTag | 191) + FnSkbSetTstamp = BuiltinFunc(platform.LinuxTag | 192) + FnImaFileHash = BuiltinFunc(platform.LinuxTag | 193) + FnKptrXchg = BuiltinFunc(platform.LinuxTag | 194) + FnMapLookupPercpuElem = BuiltinFunc(platform.LinuxTag | 195) + FnSkcToMptcpSock = BuiltinFunc(platform.LinuxTag | 196) + FnDynptrFromMem = BuiltinFunc(platform.LinuxTag | 197) + FnRingbufReserveDynptr = BuiltinFunc(platform.LinuxTag | 198) + FnRingbufSubmitDynptr = BuiltinFunc(platform.LinuxTag | 199) + FnRingbufDiscardDynptr = BuiltinFunc(platform.LinuxTag | 200) + FnDynptrRead = BuiltinFunc(platform.LinuxTag | 201) + FnDynptrWrite = BuiltinFunc(platform.LinuxTag | 202) + FnDynptrData = BuiltinFunc(platform.LinuxTag | 203) + FnTcpRawGenSyncookieIpv4 = BuiltinFunc(platform.LinuxTag | 204) + FnTcpRawGenSyncookieIpv6 = BuiltinFunc(platform.LinuxTag | 205) + FnTcpRawCheckSyncookieIpv4 = BuiltinFunc(platform.LinuxTag | 206) + FnTcpRawCheckSyncookieIpv6 = BuiltinFunc(platform.LinuxTag | 207) + FnKtimeGetTaiNs = BuiltinFunc(platform.LinuxTag | 208) + FnUserRingbufDrain = BuiltinFunc(platform.LinuxTag | 209) + FnCgrpStorageGet = BuiltinFunc(platform.LinuxTag | 210) + FnCgrpStorageDelete = BuiltinFunc(platform.LinuxTag | 211) +) diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/func_string.go b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/func_string.go index 179bc24f1..d5d624f09 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/func_string.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/func_string.go @@ -184,15 +184,93 @@ func _() { _ = x[FnGetFuncIp-173] _ = x[FnGetAttachCookie-174] _ = x[FnTaskPtRegs-175] + _ = x[FnGetBranchSnapshot-176] + _ = x[FnTraceVprintk-177] + _ = x[FnSkcToUnixSock-178] + _ = x[FnKallsymsLookupName-179] + _ = x[FnFindVma-180] + _ = x[FnLoop-181] + _ = x[FnStrncmp-182] + _ = x[FnGetFuncArg-183] + _ = x[FnGetFuncRet-184] + _ = x[FnGetFuncArgCnt-185] + _ = x[FnGetRetval-186] + _ = x[FnSetRetval-187] + _ = x[FnXdpGetBuffLen-188] + _ = x[FnXdpLoadBytes-189] + _ = x[FnXdpStoreBytes-190] + _ = x[FnCopyFromUserTask-191] + _ = x[FnSkbSetTstamp-192] + _ = x[FnImaFileHash-193] + _ = x[FnKptrXchg-194] + _ = x[FnMapLookupPercpuElem-195] + _ = x[FnSkcToMptcpSock-196] + _ = x[FnDynptrFromMem-197] + _ = x[FnRingbufReserveDynptr-198] + _ = x[FnRingbufSubmitDynptr-199] + _ = x[FnRingbufDiscardDynptr-200] + _ = x[FnDynptrRead-201] + _ = x[FnDynptrWrite-202] + _ = x[FnDynptrData-203] + _ = x[FnTcpRawGenSyncookieIpv4-204] + _ = x[FnTcpRawGenSyncookieIpv6-205] + _ = x[FnTcpRawCheckSyncookieIpv4-206] + _ = x[FnTcpRawCheckSyncookieIpv6-207] + _ = x[FnKtimeGetTaiNs-208] + _ = x[FnUserRingbufDrain-209] + _ = x[FnCgrpStorageGet-210] + _ = x[FnCgrpStorageDelete-211] + _ = x[WindowsFnMapLookupElem-268435457] + _ = x[WindowsFnMapUpdateElem-268435458] + _ = x[WindowsFnMapDeleteElem-268435459] + _ = x[WindowsFnMapLookupAndDeleteElem-268435460] + _ = x[WindowsFnTailCall-268435461] + _ = x[WindowsFnGetPrandomU32-268435462] + _ = x[WindowsFnKtimeGetBootNs-268435463] + _ = x[WindowsFnGetSmpProcessorId-268435464] + _ = x[WindowsFnKtimeGetNs-268435465] + _ = x[WindowsFnCsumDiff-268435466] + _ = x[WindowsFnRingbufOutput-268435467] + _ = x[WindowsFnTracePrintk2-268435468] + _ = x[WindowsFnTracePrintk3-268435469] + _ = x[WindowsFnTracePrintk4-268435470] + _ = x[WindowsFnTracePrintk5-268435471] + _ = x[WindowsFnMapPushElem-268435472] + _ = x[WindowsFnMapPopElem-268435473] + _ = x[WindowsFnMapPeekElem-268435474] + _ = x[WindowsFnGetCurrentPidTgid-268435475] + _ = x[WindowsFnGetCurrentLogonId-268435476] + _ = x[WindowsFnIsCurrentAdmin-268435477] + _ = x[WindowsFnMemcpy-268435478] + _ = x[WindowsFnMemcmp-268435479] + _ = x[WindowsFnMemset-268435480] + _ = x[WindowsFnMemmove-268435481] + _ = x[WindowsFnGetSocketCookie-268435482] + _ = x[WindowsFnStrncpyS-268435483] + _ = x[WindowsFnStrncatS-268435484] + _ = x[WindowsFnStrnlenS-268435485] + _ = x[WindowsFnKtimeGetBootMs-268435486] + _ = x[WindowsFnKtimeGetMs-268435487] } -const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysCloseFnTimerInitFnTimerSetCallbackFnTimerStartFnTimerCancelFnGetFuncIpFnGetAttachCookieFnTaskPtRegs" +const ( + _BuiltinFunc_name_0 = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysCloseFnTimerInitFnTimerSetCallbackFnTimerStartFnTimerCancelFnGetFuncIpFnGetAttachCookieFnTaskPtRegsFnGetBranchSnapshotFnTraceVprintkFnSkcToUnixSockFnKallsymsLookupNameFnFindVmaFnLoopFnStrncmpFnGetFuncArgFnGetFuncRetFnGetFuncArgCntFnGetRetvalFnSetRetvalFnXdpGetBuffLenFnXdpLoadBytesFnXdpStoreBytesFnCopyFromUserTaskFnSkbSetTstampFnImaFileHashFnKptrXchgFnMapLookupPercpuElemFnSkcToMptcpSockFnDynptrFromMemFnRingbufReserveDynptrFnRingbufSubmitDynptrFnRingbufDiscardDynptrFnDynptrReadFnDynptrWriteFnDynptrDataFnTcpRawGenSyncookieIpv4FnTcpRawGenSyncookieIpv6FnTcpRawCheckSyncookieIpv4FnTcpRawCheckSyncookieIpv6FnKtimeGetTaiNsFnUserRingbufDrainFnCgrpStorageGetFnCgrpStorageDelete" + _BuiltinFunc_name_1 = "WindowsFnMapLookupElemWindowsFnMapUpdateElemWindowsFnMapDeleteElemWindowsFnMapLookupAndDeleteElemWindowsFnTailCallWindowsFnGetPrandomU32WindowsFnKtimeGetBootNsWindowsFnGetSmpProcessorIdWindowsFnKtimeGetNsWindowsFnCsumDiffWindowsFnRingbufOutputWindowsFnTracePrintk2WindowsFnTracePrintk3WindowsFnTracePrintk4WindowsFnTracePrintk5WindowsFnMapPushElemWindowsFnMapPopElemWindowsFnMapPeekElemWindowsFnGetCurrentPidTgidWindowsFnGetCurrentLogonIdWindowsFnIsCurrentAdminWindowsFnMemcpyWindowsFnMemcmpWindowsFnMemsetWindowsFnMemmoveWindowsFnGetSocketCookieWindowsFnStrncpySWindowsFnStrncatSWindowsFnStrnlenSWindowsFnKtimeGetBootMsWindowsFnKtimeGetMs" +) -var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497, 2508, 2526, 2538, 2551, 2562, 2579, 2591} +var ( + _BuiltinFunc_index_0 = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497, 2508, 2526, 2538, 2551, 2562, 2579, 2591, 2610, 2624, 2639, 2659, 2668, 2674, 2683, 2695, 2707, 2722, 2733, 2744, 2759, 2773, 2788, 2806, 2820, 2833, 2843, 2864, 2880, 2895, 2917, 2938, 2960, 2972, 2985, 2997, 3021, 3045, 3071, 3097, 3112, 3130, 3146, 3165} + _BuiltinFunc_index_1 = [...]uint16{0, 22, 44, 66, 97, 114, 136, 159, 185, 204, 221, 243, 264, 285, 306, 327, 347, 366, 386, 412, 438, 461, 476, 491, 506, 522, 546, 563, 580, 597, 620, 639} +) func (i BuiltinFunc) String() string { - if i < 0 || i >= BuiltinFunc(len(_BuiltinFunc_index)-1) { + switch { + case i <= 211: + return _BuiltinFunc_name_0[_BuiltinFunc_index_0[i]:_BuiltinFunc_index_0[i+1]] + case 268435457 <= i && i <= 268435487: + i -= 268435457 + return _BuiltinFunc_name_1[_BuiltinFunc_index_1[i]:_BuiltinFunc_index_1[i+1]] + default: return "BuiltinFunc(" + strconv.FormatInt(int64(i), 10) + ")" } - return _BuiltinFunc_name[_BuiltinFunc_index[i]:_BuiltinFunc_index[i+1]] } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/func_win.go b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/func_win.go new file mode 100644 index 000000000..b016f0086 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/func_win.go @@ -0,0 +1,44 @@ +// Code generated by internal/cmd/genwinfunctions.awk; DO NOT EDIT. + +package asm + +// Code in this file is derived from eBPF for Windows, available under the MIT License. + +import ( + "github.com/cilium/ebpf/internal/platform" +) + +// Built-in functions (Windows). +const ( + WindowsFnMapLookupElem = BuiltinFunc(platform.WindowsTag | 1) + WindowsFnMapUpdateElem = BuiltinFunc(platform.WindowsTag | 2) + WindowsFnMapDeleteElem = BuiltinFunc(platform.WindowsTag | 3) + WindowsFnMapLookupAndDeleteElem = BuiltinFunc(platform.WindowsTag | 4) + WindowsFnTailCall = BuiltinFunc(platform.WindowsTag | 5) + WindowsFnGetPrandomU32 = BuiltinFunc(platform.WindowsTag | 6) + WindowsFnKtimeGetBootNs = BuiltinFunc(platform.WindowsTag | 7) + WindowsFnGetSmpProcessorId = BuiltinFunc(platform.WindowsTag | 8) + WindowsFnKtimeGetNs = BuiltinFunc(platform.WindowsTag | 9) + WindowsFnCsumDiff = BuiltinFunc(platform.WindowsTag | 10) + WindowsFnRingbufOutput = BuiltinFunc(platform.WindowsTag | 11) + WindowsFnTracePrintk2 = BuiltinFunc(platform.WindowsTag | 12) + WindowsFnTracePrintk3 = BuiltinFunc(platform.WindowsTag | 13) + WindowsFnTracePrintk4 = BuiltinFunc(platform.WindowsTag | 14) + WindowsFnTracePrintk5 = BuiltinFunc(platform.WindowsTag | 15) + WindowsFnMapPushElem = BuiltinFunc(platform.WindowsTag | 16) + WindowsFnMapPopElem = BuiltinFunc(platform.WindowsTag | 17) + WindowsFnMapPeekElem = BuiltinFunc(platform.WindowsTag | 18) + WindowsFnGetCurrentPidTgid = BuiltinFunc(platform.WindowsTag | 19) + WindowsFnGetCurrentLogonId = BuiltinFunc(platform.WindowsTag | 20) + WindowsFnIsCurrentAdmin = BuiltinFunc(platform.WindowsTag | 21) + WindowsFnMemcpy = BuiltinFunc(platform.WindowsTag | 22) + WindowsFnMemcmp = BuiltinFunc(platform.WindowsTag | 23) + WindowsFnMemset = BuiltinFunc(platform.WindowsTag | 24) + WindowsFnMemmove = BuiltinFunc(platform.WindowsTag | 25) + WindowsFnGetSocketCookie = BuiltinFunc(platform.WindowsTag | 26) + WindowsFnStrncpyS = BuiltinFunc(platform.WindowsTag | 27) + WindowsFnStrncatS = BuiltinFunc(platform.WindowsTag | 28) + WindowsFnStrnlenS = BuiltinFunc(platform.WindowsTag | 29) + WindowsFnKtimeGetBootMs = BuiltinFunc(platform.WindowsTag | 30) + WindowsFnKtimeGetMs = BuiltinFunc(platform.WindowsTag | 31) +) diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/instruction.go b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/instruction.go index 22975e8f7..b2ce72ca8 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/instruction.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/instruction.go @@ -8,9 +8,12 @@ import ( "fmt" "io" "math" + "sort" "strings" - "github.com/cilium/ebpf/internal/unix" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/sys" ) // InstructionSize is the size of a BPF instruction in bytes @@ -19,6 +22,10 @@ const InstructionSize = 8 // RawInstructionOffset is an offset in units of raw BPF instructions. type RawInstructionOffset uint64 +var ErrUnreferencedSymbol = errors.New("unreferenced symbol") +var ErrUnsatisfiedMapReference = errors.New("unsatisfied map reference") +var ErrUnsatisfiedProgramReference = errors.New("unsatisfied program reference") + // Bytes returns the offset of an instruction in bytes. func (rio RawInstructionOffset) Bytes() uint64 { return uint64(rio) * InstructionSize @@ -32,24 +39,15 @@ type Instruction struct { Offset int16 Constant int64 - // Reference denotes a reference (e.g. a jump) to another symbol. - Reference string - - // Symbol denotes an instruction at the start of a function body. - Symbol string -} - -// Sym creates a symbol. -func (ins Instruction) Sym(name string) Instruction { - ins.Symbol = name - return ins + // Metadata contains optional metadata about this instruction. + Metadata Metadata } // Unmarshal decodes a BPF instruction. -func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, error) { +func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder, platform string) error { data := make([]byte, InstructionSize) if _, err := io.ReadFull(r, data); err != nil { - return 0, err + return err } ins.OpCode = OpCode(data[0]) @@ -63,31 +61,77 @@ func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, err } ins.Offset = int16(bo.Uint16(data[2:4])) + // Convert to int32 before widening to int64 // to ensure the signed bit is carried over. ins.Constant = int64(int32(bo.Uint32(data[4:8]))) + if ins.IsBuiltinCall() { + if ins.Constant >= 0 { + // Leave negative constants from the instruction stream + // unchanged. These are sometimes used as placeholders for later + // patching. + // This relies on not having a valid platform tag with a high bit set. + fn, err := BuiltinFuncForPlatform(platform, uint32(ins.Constant)) + if err != nil { + return err + } + ins.Constant = int64(fn) + } + } else if ins.OpCode.Class().IsALU() { + switch ins.OpCode.ALUOp() { + case Div: + if ins.Offset == 1 { + ins.OpCode = ins.OpCode.SetALUOp(SDiv) + ins.Offset = 0 + } + case Mod: + if ins.Offset == 1 { + ins.OpCode = ins.OpCode.SetALUOp(SMod) + ins.Offset = 0 + } + case Mov: + switch ins.Offset { + case 8: + ins.OpCode = ins.OpCode.SetALUOp(MovSX8) + ins.Offset = 0 + case 16: + ins.OpCode = ins.OpCode.SetALUOp(MovSX16) + ins.Offset = 0 + case 32: + ins.OpCode = ins.OpCode.SetALUOp(MovSX32) + ins.Offset = 0 + } + } + } else if ins.OpCode.Class() == StXClass && + ins.OpCode.Mode() == AtomicMode { + // For atomic ops, part of the opcode is stored in the + // constant field. Shift over 8 bytes so we can OR with the actual opcode and + // apply `atomicMask` to avoid merging unknown bits that may be added in the future. + ins.OpCode |= (OpCode((ins.Constant << 8)) & atomicMask) + } + if !ins.OpCode.IsDWordLoad() { - return InstructionSize, nil + return nil } // Pull another instruction from the stream to retrieve the second // half of the 64-bit immediate value. if _, err := io.ReadFull(r, data); err != nil { // No Wrap, to avoid io.EOF clash - return 0, errors.New("64bit immediate is missing second half") + return errors.New("64bit immediate is missing second half") } // Require that all fields other than the value are zero. if bo.Uint32(data[0:4]) != 0 { - return 0, errors.New("64bit immediate has non-zero fields") + return errors.New("64bit immediate has non-zero fields") } cons1 := uint32(ins.Constant) cons2 := int32(bo.Uint32(data[4:8])) ins.Constant = int64(cons2)<<32 | int64(cons1) - return 2 * InstructionSize, nil + return nil } // Marshal encodes a BPF instruction. @@ -109,8 +153,48 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) return 0, fmt.Errorf("can't marshal registers: %s", err) } + if ins.IsBuiltinCall() { + fn := BuiltinFunc(ins.Constant) + plat, value := platform.DecodeConstant(fn) + if plat != platform.Native { + return 0, fmt.Errorf("function %s (%s): %w", fn, plat, internal.ErrNotSupportedOnOS) + } + cons = int32(value) + } else if ins.OpCode.Class().IsALU() { + newOffset := int16(0) + switch ins.OpCode.ALUOp() { + case SDiv: + ins.OpCode = ins.OpCode.SetALUOp(Div) + newOffset = 1 + case SMod: + ins.OpCode = ins.OpCode.SetALUOp(Mod) + newOffset = 1 + case MovSX8: + ins.OpCode = ins.OpCode.SetALUOp(Mov) + newOffset = 8 + case MovSX16: + ins.OpCode = ins.OpCode.SetALUOp(Mov) + newOffset = 16 + case MovSX32: + ins.OpCode = ins.OpCode.SetALUOp(Mov) + newOffset = 32 + } + if newOffset != 0 && ins.Offset != 0 { + return 0, fmt.Errorf("extended ALU opcodes should have an .Offset of 0: %s", ins) + } + ins.Offset = newOffset + } else if atomic := ins.OpCode.AtomicOp(); atomic != InvalidAtomic { + ins.OpCode = ins.OpCode &^ atomicMask + ins.Constant = int64(atomic >> 8) + } + + op, err := ins.OpCode.bpfOpCode() + if err != nil { + return 0, err + } + data := make([]byte, InstructionSize) - data[0] = byte(ins.OpCode) + data[0] = op data[1] = byte(regs) bo.PutUint16(data[2:4], uint16(ins.Offset)) bo.PutUint32(data[4:8], uint32(cons)) @@ -133,31 +217,65 @@ func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) return 2 * InstructionSize, nil } +// AssociateMap associates a Map with this Instruction. +// +// Implicitly clears the Instruction's Reference field. +// +// Returns an error if the Instruction is not a map load. +func (ins *Instruction) AssociateMap(m FDer) error { + if !ins.IsLoadFromMap() { + return errors.New("not a load from a map") + } + + ins.Metadata.Set(referenceMeta{}, nil) + ins.Metadata.Set(mapMeta{}, m) + + return nil +} + // RewriteMapPtr changes an instruction to use a new map fd. // // Returns an error if the instruction doesn't load a map. +// +// Deprecated: use AssociateMap instead. If you cannot provide a Map, +// wrap an fd in a type implementing FDer. func (ins *Instruction) RewriteMapPtr(fd int) error { - if !ins.OpCode.IsDWordLoad() { - return fmt.Errorf("%s is not a 64 bit load", ins.OpCode) - } - - if ins.Src != PseudoMapFD && ins.Src != PseudoMapValue { + if !ins.IsLoadFromMap() { return errors.New("not a load from a map") } + ins.encodeMapFD(fd) + + return nil +} + +func (ins *Instruction) encodeMapFD(fd int) { // Preserve the offset value for direct map loads. offset := uint64(ins.Constant) & (math.MaxUint32 << 32) rawFd := uint64(uint32(fd)) ins.Constant = int64(offset | rawFd) - return nil } // MapPtr returns the map fd for this instruction. // // The result is undefined if the instruction is not a load from a map, // see IsLoadFromMap. +// +// Deprecated: use Map() instead. func (ins *Instruction) MapPtr() int { - return int(int32(uint64(ins.Constant) & math.MaxUint32)) + // If there is a map associated with the instruction, return its FD. + if fd := ins.Metadata.Get(mapMeta{}); fd != nil { + return fd.(FDer).FD() + } + + // Fall back to the fd stored in the Constant field + return ins.mapFd() +} + +// mapFd returns the map file descriptor stored in the 32 least significant +// bits of ins' Constant field. +func (ins *Instruction) mapFd() int { + return int(int32(ins.Constant)) } // RewriteMapOffset changes the offset of a direct load from a map. @@ -195,6 +313,13 @@ func (ins *Instruction) IsFunctionCall() bool { return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall } +// IsKfuncCall returns true if the instruction calls a kfunc. +// +// This is not the same thing as a BPF helper call. +func (ins *Instruction) IsKfuncCall() bool { + return ins.OpCode.JumpOp() == Call && ins.Src == PseudoKfuncCall +} + // IsLoadOfFunctionPointer returns true if the instruction loads a function pointer. func (ins *Instruction) IsLoadOfFunctionPointer() bool { return ins.OpCode.IsDWordLoad() && ins.Src == PseudoFunc @@ -239,21 +364,30 @@ func (ins Instruction) Format(f fmt.State, c rune) { } if ins.IsLoadFromMap() { - fd := ins.MapPtr() + fd := ins.mapFd() + m := ins.Map() switch ins.Src { case PseudoMapFD: - fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd) + if m != nil { + fmt.Fprintf(f, "LoadMapPtr dst: %s map: %s", ins.Dst, m) + } else { + fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd) + } case PseudoMapValue: - fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset()) + if m != nil { + fmt.Fprintf(f, "LoadMapValue dst: %s, map: %s off: %d", ins.Dst, m, ins.mapOffset()) + } else { + fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset()) + } } goto ref } - fmt.Fprintf(f, "%v ", op) switch cls := op.Class(); { case cls.isLoadOrStore(): + fmt.Fprintf(f, "%v ", op) switch op.Mode() { case ImmMode: fmt.Fprintf(f, "dst: %s imm: %d", ins.Dst, ins.Constant) @@ -261,30 +395,50 @@ func (ins Instruction) Format(f fmt.State, c rune) { fmt.Fprintf(f, "imm: %d", ins.Constant) case IndMode: fmt.Fprintf(f, "dst: %s src: %s imm: %d", ins.Dst, ins.Src, ins.Constant) - case MemMode: + case MemMode, MemSXMode: fmt.Fprintf(f, "dst: %s src: %s off: %d imm: %d", ins.Dst, ins.Src, ins.Offset, ins.Constant) - case XAddMode: - fmt.Fprintf(f, "dst: %s src: %s", ins.Dst, ins.Src) + case AtomicMode: + fmt.Fprintf(f, "dst: %s src: %s off: %d", ins.Dst, ins.Src, ins.Offset) } case cls.IsALU(): - fmt.Fprintf(f, "dst: %s ", ins.Dst) - if op.ALUOp() == Swap || op.Source() == ImmSource { + fmt.Fprintf(f, "%v", op) + if op == Swap.Op(ImmSource) { + fmt.Fprintf(f, "%d", ins.Constant) + } + + fmt.Fprintf(f, " dst: %s ", ins.Dst) + switch { + case op.ALUOp() == Swap: + break + case op.Source() == ImmSource: fmt.Fprintf(f, "imm: %d", ins.Constant) - } else { + default: fmt.Fprintf(f, "src: %s", ins.Src) } case cls.IsJump(): + fmt.Fprintf(f, "%v ", op) switch jop := op.JumpOp(); jop { case Call: - if ins.Src == PseudoCall { + switch ins.Src { + case PseudoCall: // bpf-to-bpf call fmt.Fprint(f, ins.Constant) - } else { + case PseudoKfuncCall: + // kfunc call + fmt.Fprintf(f, "Kfunc(%d)", ins.Constant) + default: fmt.Fprint(f, BuiltinFunc(ins.Constant)) } + case Ja: + if ins.OpCode.Class() == Jump32Class { + fmt.Fprintf(f, "imm: %d", ins.Constant) + } else { + fmt.Fprintf(f, "off: %d", ins.Offset) + } + default: fmt.Fprintf(f, "dst: %s off: %d ", ins.Dst, ins.Offset) if op.Source() == ImmSource { @@ -293,45 +447,136 @@ func (ins Instruction) Format(f fmt.State, c rune) { fmt.Fprintf(f, "src: %s", ins.Src) } } + default: + fmt.Fprintf(f, "%v ", op) } ref: - if ins.Reference != "" { - fmt.Fprintf(f, " <%s>", ins.Reference) + if ins.Reference() != "" { + fmt.Fprintf(f, " <%s>", ins.Reference()) } } +func (ins Instruction) equal(other Instruction) bool { + return ins.OpCode == other.OpCode && + ins.Dst == other.Dst && + ins.Src == other.Src && + ins.Offset == other.Offset && + ins.Constant == other.Constant +} + // Size returns the amount of bytes ins would occupy in binary form. func (ins Instruction) Size() uint64 { return uint64(InstructionSize * ins.OpCode.rawInstructions()) } +// WithMetadata sets the given Metadata on the Instruction. e.g. to copy +// Metadata from another Instruction when replacing it. +func (ins Instruction) WithMetadata(meta Metadata) Instruction { + ins.Metadata = meta + return ins +} + +type symbolMeta struct{} + +// WithSymbol marks the Instruction as a Symbol, which other Instructions +// can point to using corresponding calls to WithReference. +func (ins Instruction) WithSymbol(name string) Instruction { + ins.Metadata.Set(symbolMeta{}, name) + return ins +} + +// Sym creates a symbol. +// +// Deprecated: use WithSymbol instead. +func (ins Instruction) Sym(name string) Instruction { + return ins.WithSymbol(name) +} + +// Symbol returns the value ins has been marked with using WithSymbol, +// otherwise returns an empty string. A symbol is often an Instruction +// at the start of a function body. +func (ins Instruction) Symbol() string { + sym, _ := ins.Metadata.Get(symbolMeta{}).(string) + return sym +} + +type referenceMeta struct{} + +// WithReference makes ins reference another Symbol or map by name. +func (ins Instruction) WithReference(ref string) Instruction { + ins.Metadata.Set(referenceMeta{}, ref) + return ins +} + +// Reference returns the Symbol or map name referenced by ins, if any. +func (ins Instruction) Reference() string { + ref, _ := ins.Metadata.Get(referenceMeta{}).(string) + return ref +} + +type mapMeta struct{} + +// Map returns the Map referenced by ins, if any. +// An Instruction will contain a Map if e.g. it references an existing, +// pinned map that was opened during ELF loading. +func (ins Instruction) Map() FDer { + fd, _ := ins.Metadata.Get(mapMeta{}).(FDer) + return fd +} + +type sourceMeta struct{} + +// WithSource adds source information about the Instruction. +func (ins Instruction) WithSource(src fmt.Stringer) Instruction { + ins.Metadata.Set(sourceMeta{}, src) + return ins +} + +// Source returns source information about the Instruction. The field is +// present when the compiler emits BTF line info about the Instruction and +// usually contains the line of source code responsible for it. +func (ins Instruction) Source() fmt.Stringer { + str, _ := ins.Metadata.Get(sourceMeta{}).(fmt.Stringer) + return str +} + +// A Comment can be passed to Instruction.WithSource to add a comment +// to an instruction. +type Comment string + +func (s Comment) String() string { + return string(s) +} + +// FDer represents a resource tied to an underlying file descriptor. +// Used as a stand-in for e.g. ebpf.Map since that type cannot be +// imported here and FD() is the only method we rely on. +type FDer interface { + FD() int +} + // Instructions is an eBPF program. type Instructions []Instruction -// Unmarshal unmarshals an Instructions from a binary instruction stream. -// All instructions in insns are replaced by instructions decoded from r. -func (insns *Instructions) Unmarshal(r io.Reader, bo binary.ByteOrder) error { - if len(*insns) > 0 { - *insns = nil - } - +// AppendInstructions decodes [Instruction] from r and appends them to insns. +func AppendInstructions(insns Instructions, r io.Reader, bo binary.ByteOrder, platform string) (Instructions, error) { var offset uint64 for { var ins Instruction - n, err := ins.Unmarshal(r, bo) + err := ins.Unmarshal(r, bo, platform) if errors.Is(err, io.EOF) { break } if err != nil { - return fmt.Errorf("offset %d: %w", offset, err) + return nil, fmt.Errorf("offset %d: %w", offset, err) } - *insns = append(*insns, ins) - offset += n + insns = append(insns, ins) + offset += ins.Size() } - return nil + return insns, nil } // Name returns the name of the function insns belongs to, if any. @@ -339,7 +584,7 @@ func (insns Instructions) Name() string { if len(insns) == 0 { return "" } - return insns[0].Symbol + return insns[0].Symbol() } func (insns Instructions) String() string { @@ -355,30 +600,66 @@ func (insns Instructions) Size() uint64 { return sum } +// AssociateMap updates all Instructions that Reference the given symbol +// to point to an existing Map m instead. +// +// Returns ErrUnreferencedSymbol error if no references to symbol are found +// in insns. If symbol is anything else than the symbol name of map (e.g. +// a bpf2bpf subprogram), an error is returned. +func (insns Instructions) AssociateMap(symbol string, m FDer) error { + if symbol == "" { + return errors.New("empty symbol") + } + + var found bool + for i := range insns { + ins := &insns[i] + if ins.Reference() != symbol { + continue + } + + if err := ins.AssociateMap(m); err != nil { + return err + } + + found = true + } + + if !found { + return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol) + } + + return nil +} + // RewriteMapPtr rewrites all loads of a specific map pointer to a new fd. // -// Returns an error if the symbol isn't used, see IsUnreferencedSymbol. +// Returns ErrUnreferencedSymbol if the symbol isn't used. +// +// Deprecated: use AssociateMap instead. func (insns Instructions) RewriteMapPtr(symbol string, fd int) error { if symbol == "" { return errors.New("empty symbol") } - found := false + var found bool for i := range insns { ins := &insns[i] - if ins.Reference != symbol { + if ins.Reference() != symbol { continue } - if err := ins.RewriteMapPtr(fd); err != nil { - return err + if !ins.IsLoadFromMap() { + return errors.New("not a load from a map") } + ins.encodeMapFD(fd) + found = true } if !found { - return &unreferencedSymbolError{symbol} + return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol) } return nil @@ -390,15 +671,15 @@ func (insns Instructions) SymbolOffsets() (map[string]int, error) { offsets := make(map[string]int) for i, ins := range insns { - if ins.Symbol == "" { + if ins.Symbol() == "" { continue } - if _, ok := offsets[ins.Symbol]; ok { - return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol) + if _, ok := offsets[ins.Symbol()]; ok { + return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol()) } - offsets[ins.Symbol] = i + offsets[ins.Symbol()] = i } return offsets, nil @@ -406,16 +687,15 @@ func (insns Instructions) SymbolOffsets() (map[string]int, error) { // FunctionReferences returns a set of symbol names these Instructions make // bpf-to-bpf calls to. -func (insns Instructions) FunctionReferences() map[string]bool { - calls := make(map[string]bool) - +func (insns Instructions) FunctionReferences() []string { + calls := make(map[string]struct{}) for _, ins := range insns { if ins.Constant != -1 { // BPF-to-BPF calls have -1 constants. continue } - if ins.Reference == "" { + if ins.Reference() == "" { continue } @@ -423,10 +703,16 @@ func (insns Instructions) FunctionReferences() map[string]bool { continue } - calls[ins.Reference] = true + calls[ins.Reference()] = struct{}{} + } + + result := make([]string, 0, len(calls)) + for call := range calls { + result = append(result, call) } - return calls + sort.Strings(result) + return result } // ReferenceOffsets returns the set of references and their offset in @@ -435,11 +721,11 @@ func (insns Instructions) ReferenceOffsets() map[string][]int { offsets := make(map[string][]int) for i, ins := range insns { - if ins.Reference == "" { + if ins.Reference() == "" { continue } - offsets[ins.Reference] = append(offsets[ins.Reference], i) + offsets[ins.Reference()] = append(offsets[ins.Reference()], i) } return offsets @@ -490,18 +776,36 @@ func (insns Instructions) Format(f fmt.State, c rune) { iter := insns.Iterate() for iter.Next() { - if iter.Ins.Symbol != "" { - fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol) + if iter.Ins.Symbol() != "" { + fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol()) + } + if src := iter.Ins.Source(); src != nil { + line := strings.TrimSpace(src.String()) + if line != "" { + fmt.Fprintf(f, "%s%*s; %s\n", indent, offsetWidth, " ", line) + } } fmt.Fprintf(f, "%s%*d: %v\n", indent, offsetWidth, iter.Offset, iter.Ins) } } // Marshal encodes a BPF program into the kernel format. +// +// insns may be modified if there are unresolved jumps or bpf2bpf calls. +// +// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction +// without a matching Symbol Instruction within insns. func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error { + if err := insns.encodeFunctionReferences(); err != nil { + return err + } + + if err := insns.encodeMapPointers(); err != nil { + return err + } + for i, ins := range insns { - _, err := ins.Marshal(w, bo) - if err != nil { + if _, err := ins.Marshal(w, bo); err != nil { return fmt.Errorf("instruction %d: %w", i, err) } } @@ -524,7 +828,97 @@ func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) { return "", fmt.Errorf("instruction %d: %w", i, err) } } - return hex.EncodeToString(h.Sum(nil)[:unix.BPF_TAG_SIZE]), nil + return hex.EncodeToString(h.Sum(nil)[:sys.BPF_TAG_SIZE]), nil +} + +// encodeFunctionReferences populates the Offset (or Constant, depending on +// the instruction type) field of instructions with a Reference field to point +// to the offset of the corresponding instruction with a matching Symbol field. +// +// Only Reference Instructions that are either jumps or BPF function references +// (calls or function pointer loads) are populated. +// +// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction +// without at least one corresponding Symbol Instruction within insns. +func (insns Instructions) encodeFunctionReferences() error { + // Index the offsets of instructions tagged as a symbol. + symbolOffsets := make(map[string]RawInstructionOffset) + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + + if ins.Symbol() == "" { + continue + } + + if _, ok := symbolOffsets[ins.Symbol()]; ok { + return fmt.Errorf("duplicate symbol %s", ins.Symbol()) + } + + symbolOffsets[ins.Symbol()] = iter.Offset + } + + // Find all instructions tagged as references to other symbols. + // Depending on the instruction type, populate their constant or offset + // fields to point to the symbol they refer to within the insn stream. + iter = insns.Iterate() + for iter.Next() { + i := iter.Index + offset := iter.Offset + ins := iter.Ins + + if ins.Reference() == "" { + continue + } + + switch { + case ins.IsFunctionReference() && ins.Constant == -1, + ins.OpCode == Ja.opCode(Jump32Class, ImmSource) && ins.Constant == -1: + symOffset, ok := symbolOffsets[ins.Reference()] + if !ok { + return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference) + } + + ins.Constant = int64(symOffset - offset - 1) + + case ins.OpCode.Class().IsJump() && ins.Offset == -1: + symOffset, ok := symbolOffsets[ins.Reference()] + if !ok { + return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference) + } + + ins.Offset = int16(symOffset - offset - 1) + } + } + + return nil +} + +// encodeMapPointers finds all Map Instructions and encodes their FDs +// into their Constant fields. +func (insns Instructions) encodeMapPointers() error { + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + + if !ins.IsLoadFromMap() { + continue + } + + m := ins.Map() + if m == nil { + continue + } + + fd := m.FD() + if fd < 0 { + return fmt.Errorf("map %s: %w", m, sys.ErrClosedFd) + } + + ins.encodeMapFD(m.FD()) + } + + return nil } // Iterate allows iterating a BPF program while keeping track of @@ -575,17 +969,10 @@ func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, erro } } -type unreferencedSymbolError struct { - symbol string -} - -func (use *unreferencedSymbolError) Error() string { - return fmt.Sprintf("unreferenced symbol %s", use.symbol) -} - // IsUnreferencedSymbol returns true if err was caused by // an unreferenced symbol. +// +// Deprecated: use errors.Is(err, asm.ErrUnreferencedSymbol). func IsUnreferencedSymbol(err error) bool { - _, ok := err.(*unreferencedSymbolError) - return ok + return errors.Is(err, ErrUnreferencedSymbol) } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/jump.go b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/jump.go index 199c06940..a14bc4c89 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/jump.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/jump.go @@ -1,16 +1,16 @@ package asm -//go:generate stringer -output jump_string.go -type=JumpOp +//go:generate go tool stringer -output jump_string.go -type=JumpOp // JumpOp affect control flow. // -// msb lsb -// +----+-+---+ -// |OP |s|cls| -// +----+-+---+ +// msb lsb +// +----+-+---+ +// |OP |s|cls| +// +----+-+---+ type JumpOp uint8 -const jumpMask OpCode = aluMask +const jumpMask OpCode = 0xf0 const ( // InvalidJumpOp is returned by getters when invoked @@ -63,71 +63,73 @@ func (op JumpOp) Op(source Source) OpCode { // Imm compares 64 bit dst to 64 bit value (sign extended), and adjusts PC by offset if the condition is fulfilled. func (op JumpOp) Imm(dst Register, value int32, label string) Instruction { return Instruction{ - OpCode: op.opCode(JumpClass, ImmSource), - Dst: dst, - Offset: -1, - Constant: int64(value), - Reference: label, - } + OpCode: op.opCode(JumpClass, ImmSource), + Dst: dst, + Offset: -1, + Constant: int64(value), + }.WithReference(label) } // Imm32 compares 32 bit dst to 32 bit value, and adjusts PC by offset if the condition is fulfilled. // Requires kernel 5.1. func (op JumpOp) Imm32(dst Register, value int32, label string) Instruction { return Instruction{ - OpCode: op.opCode(Jump32Class, ImmSource), - Dst: dst, - Offset: -1, - Constant: int64(value), - Reference: label, - } + OpCode: op.opCode(Jump32Class, ImmSource), + Dst: dst, + Offset: -1, + Constant: int64(value), + }.WithReference(label) } // Reg compares 64 bit dst to 64 bit src, and adjusts PC by offset if the condition is fulfilled. func (op JumpOp) Reg(dst, src Register, label string) Instruction { return Instruction{ - OpCode: op.opCode(JumpClass, RegSource), - Dst: dst, - Src: src, - Offset: -1, - Reference: label, - } + OpCode: op.opCode(JumpClass, RegSource), + Dst: dst, + Src: src, + Offset: -1, + }.WithReference(label) } // Reg32 compares 32 bit dst to 32 bit src, and adjusts PC by offset if the condition is fulfilled. // Requires kernel 5.1. func (op JumpOp) Reg32(dst, src Register, label string) Instruction { return Instruction{ - OpCode: op.opCode(Jump32Class, RegSource), - Dst: dst, - Src: src, - Offset: -1, - Reference: label, - } + OpCode: op.opCode(Jump32Class, RegSource), + Dst: dst, + Src: src, + Offset: -1, + }.WithReference(label) } func (op JumpOp) opCode(class Class, source Source) OpCode { - if op == Exit || op == Call || op == Ja { + if op == Exit || op == Call { return InvalidOpCode } return OpCode(class).SetJumpOp(op).SetSource(source) } +// LongJump returns a jump always instruction with a range of [-2^31, 2^31 - 1]. +func LongJump(label string) Instruction { + return Instruction{ + OpCode: Ja.opCode(Jump32Class, ImmSource), + Constant: -1, + }.WithReference(label) +} + // Label adjusts PC to the address of the label. func (op JumpOp) Label(label string) Instruction { if op == Call { return Instruction{ - OpCode: OpCode(JumpClass).SetJumpOp(Call), - Src: PseudoCall, - Constant: -1, - Reference: label, - } + OpCode: OpCode(JumpClass).SetJumpOp(Call), + Src: PseudoCall, + Constant: -1, + }.WithReference(label) } return Instruction{ - OpCode: OpCode(JumpClass).SetJumpOp(op), - Offset: -1, - Reference: label, - } + OpCode: OpCode(JumpClass).SetJumpOp(op), + Offset: -1, + }.WithReference(label) } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/load_store.go b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/load_store.go index 85ed286b0..a32a9b318 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/load_store.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/load_store.go @@ -1,13 +1,15 @@ package asm -//go:generate stringer -output load_store_string.go -type=Mode,Size +import "fmt" + +//go:generate go tool stringer -output load_store_string.go -type=Mode,Size // Mode for load and store operations // -// msb lsb -// +---+--+---+ -// |MDE|sz|cls| -// +---+--+---+ +// msb lsb +// +---+--+---+ +// |MDE|sz|cls| +// +---+--+---+ type Mode uint8 const modeMask OpCode = 0xe0 @@ -24,16 +26,127 @@ const ( IndMode Mode = 0x40 // MemMode - load from memory MemMode Mode = 0x60 - // XAddMode - add atomically across processors. - XAddMode Mode = 0xc0 + // MemSXMode - load from memory, sign extension + MemSXMode Mode = 0x80 + // AtomicMode - add atomically across processors. + AtomicMode Mode = 0xc0 ) +const atomicMask OpCode = 0x0001_ff00 + +type AtomicOp uint32 + +const ( + InvalidAtomic AtomicOp = 0xffff_ffff + + // AddAtomic - add src to memory address dst atomically + AddAtomic AtomicOp = AtomicOp(Add) << 8 + // FetchAdd - add src to memory address dst atomically, store result in src + FetchAdd AtomicOp = AddAtomic | fetch + // AndAtomic - bitwise AND src with memory address at dst atomically + AndAtomic AtomicOp = AtomicOp(And) << 8 + // FetchAnd - bitwise AND src with memory address at dst atomically, store result in src + FetchAnd AtomicOp = AndAtomic | fetch + // OrAtomic - bitwise OR src with memory address at dst atomically + OrAtomic AtomicOp = AtomicOp(Or) << 8 + // FetchOr - bitwise OR src with memory address at dst atomically, store result in src + FetchOr AtomicOp = OrAtomic | fetch + // XorAtomic - bitwise XOR src with memory address at dst atomically + XorAtomic AtomicOp = AtomicOp(Xor) << 8 + // FetchXor - bitwise XOR src with memory address at dst atomically, store result in src + FetchXor AtomicOp = XorAtomic | fetch + + // Xchg - atomically exchange the old value with the new value + // + // src gets populated with the old value of *(size *)(dst + offset). + Xchg AtomicOp = 0x0000_e000 | fetch + // CmpXchg - atomically compare and exchange the old value with the new value + // + // Compares R0 and *(size *)(dst + offset), writes src to *(size *)(dst + offset) on match. + // R0 gets populated with the old value of *(size *)(dst + offset), even if no exchange occurs. + CmpXchg AtomicOp = 0x0000_f000 | fetch + + // fetch modifier for copy-modify-write atomics + fetch AtomicOp = 0x0000_0100 + // loadAcquire - atomically load with acquire semantics + loadAcquire AtomicOp = 0x0001_0000 + // storeRelease - atomically store with release semantics + storeRelease AtomicOp = 0x0001_1000 +) + +func (op AtomicOp) String() string { + var name string + switch op { + case AddAtomic, AndAtomic, OrAtomic, XorAtomic: + name = ALUOp(op >> 8).String() + case FetchAdd, FetchAnd, FetchOr, FetchXor: + name = "Fetch" + ALUOp((op^fetch)>>8).String() + case Xchg: + name = "Xchg" + case CmpXchg: + name = "CmpXchg" + case loadAcquire: + name = "LdAcq" + case storeRelease: + name = "StRel" + default: + name = fmt.Sprintf("AtomicOp(%#x)", uint32(op)) + } + + return name +} + +func (op AtomicOp) OpCode(size Size) OpCode { + switch op { + case AddAtomic, AndAtomic, OrAtomic, XorAtomic, + FetchAdd, FetchAnd, FetchOr, FetchXor, + Xchg, CmpXchg: + switch size { + case Byte, Half: + // 8-bit and 16-bit atomic copy-modify-write atomics are not supported + return InvalidOpCode + } + } + + return OpCode(StXClass).SetMode(AtomicMode).SetSize(size).SetAtomicOp(op) +} + +// Mem emits `*(size *)(dst + offset) (op) src`. +func (op AtomicOp) Mem(dst, src Register, size Size, offset int16) Instruction { + return Instruction{ + OpCode: op.OpCode(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + +// Emits `lock-acquire dst = *(size *)(src + offset)`. +func LoadAcquire(dst, src Register, size Size, offset int16) Instruction { + return Instruction{ + OpCode: loadAcquire.OpCode(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + +// Emits `lock-release *(size *)(dst + offset) = src`. +func StoreRelease(dst, src Register, size Size, offset int16) Instruction { + return Instruction{ + OpCode: storeRelease.OpCode(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + // Size of load and store operations // -// msb lsb -// +---+--+---+ -// |mde|SZ|cls| -// +---+--+---+ +// msb lsb +// +---+--+---+ +// |mde|SZ|cls| +// +---+--+---+ type Size uint8 const sizeMask OpCode = 0x18 @@ -73,6 +186,11 @@ func LoadMemOp(size Size) OpCode { return OpCode(LdXClass).SetMode(MemMode).SetSize(size) } +// LoadMemSXOp returns the OpCode to load a value of given size from memory sign extended. +func LoadMemSXOp(size Size) OpCode { + return OpCode(LdXClass).SetMode(MemSXMode).SetSize(size) +} + // LoadMem emits `dst = *(size *)(src + offset)`. func LoadMem(dst, src Register, offset int16, size Size) Instruction { return Instruction{ @@ -83,6 +201,20 @@ func LoadMem(dst, src Register, offset int16, size Size) Instruction { } } +// LoadMemSX emits `dst = *(size *)(src + offset)` but sign extends dst. +func LoadMemSX(dst, src Register, offset int16, size Size) Instruction { + if size == DWord { + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: LoadMemSXOp(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + // LoadImmOp returns the OpCode to load an immediate of given size. // // As of kernel 4.20, only DWord size is accepted. @@ -181,6 +313,10 @@ func StoreImmOp(size Size) OpCode { // StoreImm emits `*(size *)(dst + offset) = value`. func StoreImm(dst Register, offset int16, value int64, size Size) Instruction { + if size == DWord { + return Instruction{OpCode: InvalidOpCode} + } + return Instruction{ OpCode: StoreImmOp(size), Dst: dst, @@ -191,14 +327,10 @@ func StoreImm(dst Register, offset int16, value int64, size Size) Instruction { // StoreXAddOp returns the OpCode to atomically add a register to a value in memory. func StoreXAddOp(size Size) OpCode { - return OpCode(StXClass).SetMode(XAddMode).SetSize(size) + return AddAtomic.OpCode(size) } // StoreXAdd atomically adds src to *dst. func StoreXAdd(dst, src Register, size Size) Instruction { - return Instruction{ - OpCode: StoreXAddOp(size), - Dst: dst, - Src: src, - } + return AddAtomic.Mem(dst, src, size, 0) } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/load_store_string.go b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/load_store_string.go index 76d29a075..bbed58b66 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/load_store_string.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/load_store_string.go @@ -13,7 +13,8 @@ func _() { _ = x[AbsMode-32] _ = x[IndMode-64] _ = x[MemMode-96] - _ = x[XAddMode-192] + _ = x[MemSXMode-128] + _ = x[AtomicMode-192] } const ( @@ -21,8 +22,9 @@ const ( _Mode_name_1 = "AbsMode" _Mode_name_2 = "IndMode" _Mode_name_3 = "MemMode" - _Mode_name_4 = "XAddMode" - _Mode_name_5 = "InvalidMode" + _Mode_name_4 = "MemSXMode" + _Mode_name_5 = "AtomicMode" + _Mode_name_6 = "InvalidMode" ) func (i Mode) String() string { @@ -35,10 +37,12 @@ func (i Mode) String() string { return _Mode_name_2 case i == 96: return _Mode_name_3 - case i == 192: + case i == 128: return _Mode_name_4 - case i == 255: + case i == 192: return _Mode_name_5 + case i == 255: + return _Mode_name_6 default: return "Mode(" + strconv.FormatInt(int64(i), 10) + ")" } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/metadata.go b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/metadata.go new file mode 100644 index 000000000..dd368a936 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/metadata.go @@ -0,0 +1,80 @@ +package asm + +// Metadata contains metadata about an instruction. +type Metadata struct { + head *metaElement +} + +type metaElement struct { + next *metaElement + key, value interface{} +} + +// Find the element containing key. +// +// Returns nil if there is no such element. +func (m *Metadata) find(key interface{}) *metaElement { + for e := m.head; e != nil; e = e.next { + if e.key == key { + return e + } + } + return nil +} + +// Remove an element from the linked list. +// +// Copies as many elements of the list as necessary to remove r, but doesn't +// perform a full copy. +func (m *Metadata) remove(r *metaElement) { + current := &m.head + for e := m.head; e != nil; e = e.next { + if e == r { + // We've found the element we want to remove. + *current = e.next + + // No need to copy the tail. + return + } + + // There is another element in front of the one we want to remove. + // We have to copy it to be able to change metaElement.next. + cpy := &metaElement{key: e.key, value: e.value} + *current = cpy + current = &cpy.next + } +} + +// Set a key to a value. +// +// If value is nil, the key is removed. Avoids modifying old metadata by +// copying if necessary. +func (m *Metadata) Set(key, value interface{}) { + if e := m.find(key); e != nil { + if e.value == value { + // Key is present and the value is the same. Nothing to do. + return + } + + // Key is present with a different value. Create a copy of the list + // which doesn't have the element in it. + m.remove(e) + } + + // m.head is now a linked list that doesn't contain key. + if value == nil { + return + } + + m.head = &metaElement{key: key, value: value, next: m.head} +} + +// Get the value of a key. +// +// Returns nil if no value with the given key is present. +func (m *Metadata) Get(key interface{}) interface{} { + if e := m.find(key); e != nil { + return e.value + } + return nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/opcode.go b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/opcode.go index f6d8e0668..9b2f80f0a 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/opcode.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/opcode.go @@ -5,35 +5,36 @@ import ( "strings" ) -//go:generate stringer -output opcode_string.go -type=Class +//go:generate go tool stringer -output opcode_string.go -type=Class // Class of operations // -// msb lsb -// +---+--+---+ -// | ?? |CLS| -// +---+--+---+ +// msb lsb +// +---+--+---+ +// | ?? |CLS| +// +---+--+---+ type Class uint8 const classMask OpCode = 0x07 const ( - // LdClass load memory + // LdClass loads immediate values into registers. + // Also used for non-standard load operations from cBPF. LdClass Class = 0x00 - // LdXClass load memory from constant + // LdXClass loads memory into registers. LdXClass Class = 0x01 - // StClass load register from memory + // StClass stores immediate values to memory. StClass Class = 0x02 - // StXClass load register from constant + // StXClass stores registers to memory. StXClass Class = 0x03 - // ALUClass arithmetic operators + // ALUClass describes arithmetic operators. ALUClass Class = 0x04 - // JumpClass jump operators + // JumpClass describes jump operators. JumpClass Class = 0x05 - // Jump32Class jump operators with 32 bit comparaisons - // Requires kernel 5.1 + // Jump32Class describes jump operators with 32-bit comparisons. + // Requires kernel 5.1. Jump32Class Class = 0x06 - // ALU64Class arithmetic in 64 bit mode + // ALU64Class describes arithmetic operators in 64-bit mode. ALU64Class Class = 0x07 ) @@ -65,18 +66,48 @@ func (cls Class) isJumpOrALU() bool { return cls.IsJump() || cls.IsALU() } -// OpCode is a packed eBPF opcode. +// OpCode represents a single operation. +// It is not a 1:1 mapping to real eBPF opcodes. // -// Its encoding is defined by a Class value: +// The encoding varies based on a 3-bit Class: // -// msb lsb -// +----+-+---+ -// | ???? |CLS| -// +----+-+---+ -type OpCode uint8 +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// ??? | CLS +// +// For ALUClass and ALUCLass32: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// 0 | OPC |S| CLS +// +// For LdClass, LdXclass, StClass and StXClass: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// 0 | MDE |SIZ| CLS +// +// For StXClass where MDE == AtomicMode: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// 0 | ATOMIC OP | MDE |SIZ| CLS +// +// For JumpClass, Jump32Class: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// 0 | OPC |S| CLS +type OpCode uint32 // InvalidOpCode is returned by setters on OpCode -const InvalidOpCode OpCode = 0xff +const InvalidOpCode OpCode = 0xffff + +// bpfOpCode returns the actual BPF opcode. +func (op OpCode) bpfOpCode() (byte, error) { + const opCodeMask = 0xff + + if !valid(op, opCodeMask) { + return 0, fmt.Errorf("invalid opcode %x", op) + } + + return byte(op & opCodeMask), nil +} // rawInstructions returns the number of BPF instructions required // to encode this opcode. @@ -112,6 +143,14 @@ func (op OpCode) Size() Size { return Size(op & sizeMask) } +// AtomicOp returns the type of atomic operation. +func (op OpCode) AtomicOp() AtomicOp { + if op.Class() != StXClass || op.Mode() != AtomicMode { + return InvalidAtomic + } + return AtomicOp(op & atomicMask) +} + // Source returns the source for branch and ALU operations. func (op OpCode) Source() Source { if !op.Class().isJumpOrALU() || op.ALUOp() == Swap { @@ -146,7 +185,7 @@ func (op OpCode) JumpOp() JumpOp { jumpOp := JumpOp(op & jumpMask) // Some JumpOps are only supported by JumpClass, not Jump32Class. - if op.Class() == Jump32Class && (jumpOp == Exit || jumpOp == Call || jumpOp == Ja) { + if op.Class() == Jump32Class && (jumpOp == Exit || jumpOp == Call) { return InvalidJumpOp } @@ -173,6 +212,13 @@ func (op OpCode) SetSize(size Size) OpCode { return (op & ^sizeMask) | OpCode(size) } +func (op OpCode) SetAtomicOp(atomic AtomicOp) OpCode { + if op.Class() != StXClass || op.Mode() != AtomicMode || !valid(OpCode(atomic), atomicMask) { + return InvalidOpCode + } + return (op & ^atomicMask) | OpCode(atomic) +} + // SetSource sets the source on jump and ALU operations. // // Returns InvalidOpCode if op is of the wrong class. @@ -221,6 +267,10 @@ func (op OpCode) String() string { mode := op.Mode() f.WriteString(strings.TrimSuffix(mode.String(), "Mode")) + if atomic := op.AtomicOp(); atomic != InvalidAtomic { + f.WriteString(strings.TrimSuffix(atomic.String(), "Atomic")) + } + switch op.Size() { case DWord: f.WriteString("DW") @@ -233,17 +283,24 @@ func (op OpCode) String() string { } case class.IsALU(): + if op.ALUOp() == Swap && op.Class() == ALU64Class { + // B to make BSwap, uncontitional byte swap + f.WriteString("B") + } + f.WriteString(op.ALUOp().String()) if op.ALUOp() == Swap { - // Width for Endian is controlled by Constant - f.WriteString(op.Endianness().String()) + if op.Class() == ALUClass { + // Width for Endian is controlled by Constant + f.WriteString(op.Endianness().String()) + } } else { + f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) + if class == ALUClass { f.WriteString("32") } - - f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) } case class.IsJump(): @@ -253,7 +310,7 @@ func (op OpCode) String() string { f.WriteString("32") } - if jop := op.JumpOp(); jop != Exit && jop != Call { + if jop := op.JumpOp(); jop != Exit && jop != Call && jop != Ja { f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/opcode_string.go b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/opcode_string.go index 58bc3e7e7..07825e0dd 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/opcode_string.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/opcode_string.go @@ -23,8 +23,9 @@ const _Class_name = "LdClassLdXClassStClassStXClassALUClassJumpClassJump32ClassA var _Class_index = [...]uint8{0, 7, 15, 22, 30, 38, 47, 58, 68} func (i Class) String() string { - if i >= Class(len(_Class_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_Class_index)-1 { return "Class(" + strconv.FormatInt(int64(i), 10) + ")" } - return _Class_name[_Class_index[i]:_Class_index[i+1]] + return _Class_name[_Class_index[idx]:_Class_index[idx+1]] } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/register.go b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/register.go index dd5d44f1c..457a3b8a8 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/asm/register.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/asm/register.go @@ -35,10 +35,11 @@ const ( // Pseudo registers used by 64bit loads and jumps const ( - PseudoMapFD = R1 // BPF_PSEUDO_MAP_FD - PseudoMapValue = R2 // BPF_PSEUDO_MAP_VALUE - PseudoCall = R1 // BPF_PSEUDO_CALL - PseudoFunc = R4 // BPF_PSEUDO_FUNC + PseudoMapFD = R1 // BPF_PSEUDO_MAP_FD + PseudoMapValue = R2 // BPF_PSEUDO_MAP_VALUE + PseudoCall = R1 // BPF_PSEUDO_CALL + PseudoFunc = R4 // BPF_PSEUDO_FUNC + PseudoKfuncCall = R2 // BPF_PSEUDO_KFUNC_CALL ) func (r Register) String() string { diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/attachtype_string.go b/src/nvcgo/vendor/github.com/cilium/ebpf/attachtype_string.go index de355ed90..efed516b6 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/attachtype_string.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/attachtype_string.go @@ -51,15 +51,50 @@ func _() { _ = x[AttachSkReuseportSelect-39] _ = x[AttachSkReuseportSelectOrMigrate-40] _ = x[AttachPerfEvent-41] + _ = x[AttachTraceKprobeMulti-42] + _ = x[AttachTraceKprobeSession-56] + _ = x[AttachLSMCgroup-43] + _ = x[AttachStructOps-44] + _ = x[AttachNetfilter-45] + _ = x[AttachTCXIngress-46] + _ = x[AttachTCXEgress-47] + _ = x[AttachTraceUprobeMulti-48] + _ = x[AttachCgroupUnixConnect-49] + _ = x[AttachCgroupUnixSendmsg-50] + _ = x[AttachCgroupUnixRecvmsg-51] + _ = x[AttachCgroupUnixGetpeername-52] + _ = x[AttachCgroupUnixGetsockname-53] + _ = x[AttachNetkitPrimary-54] + _ = x[AttachNetkitPeer-55] + _ = x[AttachWindowsXDP-268435457] + _ = x[AttachWindowsBind-268435458] + _ = x[AttachWindowsCGroupInet4Connect-268435459] + _ = x[AttachWindowsCGroupInet6Connect-268435460] + _ = x[AttachWindowsCgroupInet4RecvAccept-268435461] + _ = x[AttachWindowsCgroupInet6RecvAccept-268435462] + _ = x[AttachWindowsCGroupSockOps-268435463] + _ = x[AttachWindowsSample-268435464] + _ = x[AttachWindowsXDPTest-268435465] } -const _AttachType_name = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEvent" +const ( + _AttachType_name_0 = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEventTraceKprobeMultiLSMCgroupStructOpsNetfilterTCXIngressTCXEgressTraceUprobeMultiCgroupUnixConnectCgroupUnixSendmsgCgroupUnixRecvmsgCgroupUnixGetpeernameCgroupUnixGetsocknameNetkitPrimaryNetkitPeerTraceKprobeSession" + _AttachType_name_1 = "WindowsXDPWindowsBindWindowsCGroupInet4ConnectWindowsCGroupInet6ConnectWindowsCgroupInet4RecvAcceptWindowsCgroupInet6RecvAcceptWindowsCGroupSockOpsWindowsSampleWindowsXDPTest" +) -var _AttachType_index = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610} +var ( + _AttachType_index_0 = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610, 626, 635, 644, 653, 663, 672, 688, 705, 722, 739, 760, 781, 794, 804, 822} + _AttachType_index_1 = [...]uint8{0, 10, 21, 46, 71, 99, 127, 147, 160, 174} +) func (i AttachType) String() string { - if i >= AttachType(len(_AttachType_index)-1) { + switch { + case i <= 56: + return _AttachType_name_0[_AttachType_index_0[i]:_AttachType_index_0[i+1]] + case 268435457 <= i && i <= 268435465: + i -= 268435457 + return _AttachType_name_1[_AttachType_index_1[i]:_AttachType_index_1[i+1]] + default: return "AttachType(" + strconv.FormatInt(int64(i), 10) + ")" } - return _AttachType_name[_AttachType_index[i]:_AttachType_index[i+1]] } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/btf/btf.go b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/btf.go new file mode 100644 index 000000000..41e1f8a6f --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/btf.go @@ -0,0 +1,550 @@ +package btf + +import ( + "debug/elf" + "errors" + "fmt" + "io" + "iter" + "maps" + "math" + "os" + "reflect" + "slices" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" +) + +const btfMagic = 0xeB9F + +// Errors returned by BTF functions. +var ( + ErrNotSupported = internal.ErrNotSupported + ErrNotFound = errors.New("not found") + ErrNoExtendedInfo = errors.New("no extended info") + ErrMultipleMatches = errors.New("multiple matching types") +) + +// ID represents the unique ID of a BTF object. +type ID = sys.BTFID + +type elfData struct { + sectionSizes map[string]uint32 + symbolOffsets map[elfSymbol]uint32 + fixups map[Type]bool +} + +type elfSymbol struct { + section string + name string +} + +// Spec allows querying a set of Types and loading the set into the +// kernel. +type Spec struct { + *decoder + + // Additional data from ELF, may be nil. + elf *elfData +} + +// LoadSpec opens file and calls LoadSpecFromReader on it. +func LoadSpec(file string) (*Spec, error) { + fh, err := os.Open(file) + if err != nil { + return nil, err + } + defer fh.Close() + + return LoadSpecFromReader(fh) +} + +// LoadSpecFromReader reads from an ELF or a raw BTF blob. +// +// Returns ErrNotFound if reading from an ELF which contains no BTF. ExtInfos +// may be nil. +func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) { + file, err := internal.NewSafeELFFile(rd) + if err != nil { + raw, err := io.ReadAll(io.NewSectionReader(rd, 0, math.MaxInt64)) + if err != nil { + return nil, fmt.Errorf("read raw BTF: %w", err) + } + + return loadRawSpec(raw, nil) + } + + return loadSpecFromELF(file) +} + +// LoadSpecAndExtInfosFromReader reads from an ELF. +// +// ExtInfos may be nil if the ELF doesn't contain section metadata. +// Returns ErrNotFound if the ELF contains no BTF. +func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) { + file, err := internal.NewSafeELFFile(rd) + if err != nil { + return nil, nil, err + } + + spec, err := loadSpecFromELF(file) + if err != nil { + return nil, nil, err + } + + extInfos, err := loadExtInfosFromELF(file, spec) + if err != nil && !errors.Is(err, ErrNotFound) { + return nil, nil, err + } + + return spec, extInfos, nil +} + +// symbolOffsets extracts all symbols offsets from an ELF and indexes them by +// section and variable name. +// +// References to variables in BTF data sections carry unsigned 32-bit offsets. +// Some ELF symbols (e.g. in vmlinux) may point to virtual memory that is well +// beyond this range. Since these symbols cannot be described by BTF info, +// ignore them here. +func symbolOffsets(file *internal.SafeELFFile) (map[elfSymbol]uint32, error) { + symbols, err := file.Symbols() + if err != nil { + return nil, fmt.Errorf("can't read symbols: %v", err) + } + + offsets := make(map[elfSymbol]uint32) + for _, sym := range symbols { + if idx := sym.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE { + // Ignore things like SHN_ABS + continue + } + + if sym.Value > math.MaxUint32 { + // VarSecinfo offset is u32, cannot reference symbols in higher regions. + continue + } + + if int(sym.Section) >= len(file.Sections) { + return nil, fmt.Errorf("symbol %s: invalid section %d", sym.Name, sym.Section) + } + + secName := file.Sections[sym.Section].Name + offsets[elfSymbol{secName, sym.Name}] = uint32(sym.Value) + } + + return offsets, nil +} + +func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) { + var ( + btfSection *elf.Section + sectionSizes = make(map[string]uint32) + ) + + for _, sec := range file.Sections { + switch sec.Name { + case ".BTF": + btfSection = sec + default: + if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS { + break + } + + if sec.Size > math.MaxUint32 { + return nil, fmt.Errorf("section %s exceeds maximum size", sec.Name) + } + + sectionSizes[sec.Name] = uint32(sec.Size) + } + } + + if btfSection == nil { + return nil, fmt.Errorf("btf: %w", ErrNotFound) + } + + offsets, err := symbolOffsets(file) + if err != nil { + return nil, err + } + + rawBTF, err := btfSection.Data() + if err != nil { + return nil, fmt.Errorf("reading .BTF section: %w", err) + } + + spec, err := loadRawSpec(rawBTF, nil) + if err != nil { + return nil, err + } + + if spec.decoder.byteOrder != file.ByteOrder { + return nil, fmt.Errorf("BTF byte order %s does not match ELF byte order %s", spec.decoder.byteOrder, file.ByteOrder) + } + + spec.elf = &elfData{ + sectionSizes, + offsets, + make(map[Type]bool), + } + + return spec, nil +} + +func loadRawSpec(btf []byte, base *Spec) (*Spec, error) { + var ( + baseDecoder *decoder + baseStrings *stringTable + err error + ) + + if base != nil { + baseDecoder = base.decoder + baseStrings = base.strings + } + + header, bo, err := parseBTFHeader(btf) + if err != nil { + return nil, fmt.Errorf("parsing .BTF header: %v", err) + } + + if header.HdrLen > uint32(len(btf)) { + return nil, fmt.Errorf("BTF header length is out of bounds") + } + btf = btf[header.HdrLen:] + + if int(header.StringOff+header.StringLen) > len(btf) { + return nil, fmt.Errorf("string table is out of bounds") + } + stringsSection := btf[header.StringOff : header.StringOff+header.StringLen] + + rawStrings, err := newStringTable(stringsSection, baseStrings) + if err != nil { + return nil, fmt.Errorf("read string section: %w", err) + } + + if int(header.TypeOff+header.TypeLen) > len(btf) { + return nil, fmt.Errorf("types section is out of bounds") + } + typesSection := btf[header.TypeOff : header.TypeOff+header.TypeLen] + + decoder, err := newDecoder(typesSection, bo, rawStrings, baseDecoder) + if err != nil { + return nil, err + } + + return &Spec{decoder, nil}, nil +} + +// fixupDatasec attempts to patch up missing info in Datasecs and its members by +// supplementing them with information from the ELF headers and symbol table. +func (elf *elfData) fixupDatasec(typ Type) error { + if elf == nil { + return nil + } + + if ds, ok := typ.(*Datasec); ok { + if elf.fixups[ds] { + return nil + } + elf.fixups[ds] = true + + name := ds.Name + + // Some Datasecs are virtual and don't have corresponding ELF sections. + switch name { + case ".ksyms": + // .ksyms describes forward declarations of kfunc signatures, as well as + // references to kernel symbols. + // Nothing to fix up, all sizes and offsets are 0. + for _, vsi := range ds.Vars { + switch t := vsi.Type.(type) { + case *Func: + continue + case *Var: + if _, ok := t.Type.(*Void); !ok { + return fmt.Errorf("data section %s: expected %s to be *Void, not %T: %w", name, vsi.Type.TypeName(), vsi.Type, ErrNotSupported) + } + default: + return fmt.Errorf("data section %s: expected to be either *btf.Func or *btf.Var, not %T: %w", name, vsi.Type, ErrNotSupported) + } + } + + return nil + case ".kconfig": + // .kconfig has a size of 0 and has all members' offsets set to 0. + // Fix up all offsets and set the Datasec's size. + if err := fixupDatasecLayout(ds); err != nil { + return err + } + + // Fix up extern to global linkage to avoid a BTF verifier error. + for _, vsi := range ds.Vars { + vsi.Type.(*Var).Linkage = GlobalVar + } + + return nil + } + + if ds.Size != 0 { + return nil + } + + ds.Size, ok = elf.sectionSizes[name] + if !ok { + return fmt.Errorf("data section %s: missing size", name) + } + + for i := range ds.Vars { + symName := ds.Vars[i].Type.TypeName() + ds.Vars[i].Offset, ok = elf.symbolOffsets[elfSymbol{name, symName}] + if !ok { + return fmt.Errorf("data section %s: missing offset for symbol %s", name, symName) + } + } + } + + return nil +} + +// fixupDatasecLayout populates ds.Vars[].Offset according to var sizes and +// alignment. Calculate and set ds.Size. +func fixupDatasecLayout(ds *Datasec) error { + var off uint32 + + for i, vsi := range ds.Vars { + v, ok := vsi.Type.(*Var) + if !ok { + return fmt.Errorf("member %d: unsupported type %T", i, vsi.Type) + } + + size, err := Sizeof(v.Type) + if err != nil { + return fmt.Errorf("variable %s: getting size: %w", v.Name, err) + } + align, err := alignof(v.Type) + if err != nil { + return fmt.Errorf("variable %s: getting alignment: %w", v.Name, err) + } + + // Align the current member based on the offset of the end of the previous + // member and the alignment of the current member. + off = internal.Align(off, uint32(align)) + + ds.Vars[i].Offset = off + + off += uint32(size) + } + + ds.Size = off + + return nil +} + +// Copy a Spec. +// +// All contained types are duplicated while preserving any modifications made +// to them. +func (s *Spec) Copy() *Spec { + if s == nil { + return nil + } + + cpy := &Spec{ + s.decoder.Copy(), + nil, + } + + if s.elf != nil { + cpy.elf = &elfData{ + s.elf.sectionSizes, + s.elf.symbolOffsets, + maps.Clone(s.elf.fixups), + } + } + + return cpy +} + +// TypeByID returns the BTF Type with the given type ID. +// +// Returns an error wrapping ErrNotFound if a Type with the given ID +// does not exist in the Spec. +func (s *Spec) TypeByID(id TypeID) (Type, error) { + typ, err := s.decoder.TypeByID(id) + if err != nil { + return nil, fmt.Errorf("inflate type: %w", err) + } + + if err := s.elf.fixupDatasec(typ); err != nil { + return nil, err + } + + return typ, nil +} + +// TypeID returns the ID for a given Type. +// +// Returns an error wrapping [ErrNotFound] if the type isn't part of the Spec. +func (s *Spec) TypeID(typ Type) (TypeID, error) { + return s.decoder.TypeID(typ) +} + +// AnyTypesByName returns a list of BTF Types with the given name. +// +// If the BTF blob describes multiple compilation units like vmlinux, multiple +// Types with the same name and kind can exist, but might not describe the same +// data structure. +// +// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec. +func (s *Spec) AnyTypesByName(name string) ([]Type, error) { + types, err := s.TypesByName(newEssentialName(name)) + if err != nil { + return nil, err + } + + for i := 0; i < len(types); i++ { + // Match against the full name, not just the essential one + // in case the type being looked up is a struct flavor. + if types[i].TypeName() != name { + types = slices.Delete(types, i, i+1) + continue + } + + if err := s.elf.fixupDatasec(types[i]); err != nil { + return nil, err + } + } + + return types, nil +} + +// AnyTypeByName returns a Type with the given name. +// +// Returns an error if multiple types of that name exist. +func (s *Spec) AnyTypeByName(name string) (Type, error) { + types, err := s.AnyTypesByName(name) + if err != nil { + return nil, err + } + + if len(types) > 1 { + return nil, fmt.Errorf("found multiple types: %v", types) + } + + return types[0], nil +} + +// TypeByName searches for a Type with a specific name. Since multiple Types +// with the same name can exist, the parameter typ is taken to narrow down the +// search in case of a clash. +// +// typ must be a non-nil pointer to an implementation of a Type. On success, the +// address of the found Type will be copied to typ. +// +// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec. +// Returns an error wrapping ErrMultipleTypes if multiple candidates are found. +func (s *Spec) TypeByName(name string, typ interface{}) error { + typeInterface := reflect.TypeOf((*Type)(nil)).Elem() + + // typ may be **T or *Type + typValue := reflect.ValueOf(typ) + if typValue.Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer", typ) + } + + typPtr := typValue.Elem() + if !typPtr.CanSet() { + return fmt.Errorf("%T cannot be set", typ) + } + + wanted := typPtr.Type() + if wanted == typeInterface { + // This is *Type. Unwrap the value's type. + wanted = typPtr.Elem().Type() + } + + if !wanted.AssignableTo(typeInterface) { + return fmt.Errorf("%T does not satisfy Type interface", typ) + } + + types, err := s.AnyTypesByName(name) + if err != nil { + return err + } + + var candidate Type + for _, typ := range types { + if reflect.TypeOf(typ) != wanted { + continue + } + + if candidate != nil { + return fmt.Errorf("type %s(%T): %w", name, typ, ErrMultipleMatches) + } + + candidate = typ + } + + if candidate == nil { + return fmt.Errorf("%s %s: %w", wanted, name, ErrNotFound) + } + + typPtr.Set(reflect.ValueOf(candidate)) + + return nil +} + +// LoadSplitSpec loads split BTF from the given file. +// +// Types from base are used to resolve references in the split BTF. +// The returned Spec only contains types from the split BTF, not from the base. +func LoadSplitSpec(file string, base *Spec) (*Spec, error) { + fh, err := os.Open(file) + if err != nil { + return nil, err + } + defer fh.Close() + + return LoadSplitSpecFromReader(fh, base) +} + +// LoadSplitSpecFromReader loads split BTF from a reader. +// +// Types from base are used to resolve references in the split BTF. +// The returned Spec only contains types from the split BTF, not from the base. +func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) { + raw, err := io.ReadAll(io.NewSectionReader(r, 0, math.MaxInt64)) + if err != nil { + return nil, fmt.Errorf("read raw BTF: %w", err) + } + + return loadRawSpec(raw, base) +} + +// All iterates over all types. +func (s *Spec) All() iter.Seq2[Type, error] { + return func(yield func(Type, error) bool) { + for id := s.firstTypeID; ; id++ { + typ, err := s.TypeByID(id) + if errors.Is(err, ErrNotFound) { + return + } else if err != nil { + yield(nil, err) + return + } + + // Skip declTags, during unmarshaling declTags become `Tags` fields of other types. + // We keep them in the spec to avoid holes in the ID space, but for the purposes of + // iteration, they are not useful to the user. + if _, ok := typ.(*declTag); ok { + continue + } + + if !yield(typ, nil) { + return + } + } + } +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/btf/btf_types.go b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/btf_types.go new file mode 100644 index 000000000..c957f5970 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/btf_types.go @@ -0,0 +1,512 @@ +package btf + +import ( + "encoding/binary" + "errors" + "fmt" + "unsafe" +) + +//go:generate go tool stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage,btfKind + +// btfKind describes a Type. +type btfKind uint8 + +// Equivalents of the BTF_KIND_* constants. +const ( + kindUnknown btfKind = iota // Unknown + kindInt // Int + kindPointer // Pointer + kindArray // Array + kindStruct // Struct + kindUnion // Union + kindEnum // Enum + kindForward // Forward + kindTypedef // Typedef + kindVolatile // Volatile + kindConst // Const + kindRestrict // Restrict + // Added ~4.20 + kindFunc // Func + kindFuncProto // FuncProto + // Added ~5.1 + kindVar // Var + kindDatasec // Datasec + // Added ~5.13 + kindFloat // Float + // Added 5.16 + kindDeclTag // DeclTag + // Added 5.17 + kindTypeTag // TypeTag + // Added 6.0 + kindEnum64 // Enum64 +) + +// FuncLinkage describes BTF function linkage metadata. +type FuncLinkage int + +// Equivalent of enum btf_func_linkage. +const ( + StaticFunc FuncLinkage = iota // static + GlobalFunc // global + ExternFunc // extern +) + +// VarLinkage describes BTF variable linkage metadata. +type VarLinkage int + +const ( + StaticVar VarLinkage = iota // static + GlobalVar // global + ExternVar // extern +) + +const ( + btfTypeKindShift = 24 + btfTypeKindLen = 5 + btfTypeVlenShift = 0 + btfTypeVlenMask = 16 + btfTypeKindFlagShift = 31 + btfTypeKindFlagMask = 1 +) + +var btfHeaderLen = binary.Size(&btfHeader{}) + +type btfHeader struct { + Magic uint16 + Version uint8 + Flags uint8 + HdrLen uint32 + + TypeOff uint32 + TypeLen uint32 + StringOff uint32 + StringLen uint32 +} + +// parseBTFHeader parses the header of the .BTF section. +func parseBTFHeader(buf []byte) (*btfHeader, binary.ByteOrder, error) { + var header btfHeader + var bo binary.ByteOrder + for _, order := range []binary.ByteOrder{binary.LittleEndian, binary.BigEndian} { + n, err := binary.Decode(buf, order, &header) + if err != nil { + return nil, nil, fmt.Errorf("read header: %v", err) + } + + if header.Magic != btfMagic { + continue + } + + buf = buf[n:] + bo = order + break + } + + if bo == nil { + return nil, nil, fmt.Errorf("no valid BTF header") + } + + if header.Version != 1 { + return nil, nil, fmt.Errorf("unexpected version %v", header.Version) + } + + if header.Flags != 0 { + return nil, nil, fmt.Errorf("unsupported flags %v", header.Flags) + } + + remainder := int64(header.HdrLen) - int64(binary.Size(&header)) + if remainder < 0 { + return nil, nil, errors.New("header length shorter than btfHeader size") + } + + for _, b := range buf[:remainder] { + if b != 0 { + return nil, nil, errors.New("header contains non-zero trailer") + } + } + + return &header, bo, nil +} + +// btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst. +type btfType struct { + NameOff uint32 + /* "info" bits arrangement + * bits 0-15: vlen (e.g. # of struct's members), linkage + * bits 16-23: unused + * bits 24-28: kind (e.g. int, ptr, array...etc) + * bits 29-30: unused + * bit 31: kind_flag, currently used by + * struct, union and fwd + */ + Info uint32 + /* "size" is used by INT, ENUM, STRUCT and UNION. + * "size" tells the size of the type it is describing. + * + * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, + * FUNC and FUNC_PROTO. + * "type" is a type_id referring to another type. + */ + SizeType uint32 +} + +var btfTypeSize = int(unsafe.Sizeof(btfType{})) + +func unmarshalBtfType(bt *btfType, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfTypeSize { + return 0, fmt.Errorf("not enough bytes to unmarshal btfType") + } + + bt.NameOff = bo.Uint32(b[0:]) + bt.Info = bo.Uint32(b[4:]) + bt.SizeType = bo.Uint32(b[8:]) + return btfTypeSize, nil +} + +func mask(len uint32) uint32 { + return (1 << len) - 1 +} + +func readBits(value, len, shift uint32) uint32 { + return (value >> shift) & mask(len) +} + +func writeBits(value, len, shift, new uint32) uint32 { + value &^= mask(len) << shift + value |= (new & mask(len)) << shift + return value +} + +func (bt *btfType) info(len, shift uint32) uint32 { + return readBits(bt.Info, len, shift) +} + +func (bt *btfType) setInfo(value, len, shift uint32) { + bt.Info = writeBits(bt.Info, len, shift, value) +} + +func (bt *btfType) Kind() btfKind { + return btfKind(bt.info(btfTypeKindLen, btfTypeKindShift)) +} + +func (bt *btfType) SetKind(kind btfKind) { + bt.setInfo(uint32(kind), btfTypeKindLen, btfTypeKindShift) +} + +func (bt *btfType) Vlen() int { + return int(bt.info(btfTypeVlenMask, btfTypeVlenShift)) +} + +func (bt *btfType) SetVlen(vlen int) { + bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift) +} + +func (bt *btfType) kindFlagBool() bool { + return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1 +} + +func (bt *btfType) setKindFlagBool(set bool) { + var value uint32 + if set { + value = 1 + } + bt.setInfo(value, btfTypeKindFlagMask, btfTypeKindFlagShift) +} + +// Bitfield returns true if the struct or union contain a bitfield. +func (bt *btfType) Bitfield() bool { + return bt.kindFlagBool() +} + +func (bt *btfType) SetBitfield(isBitfield bool) { + bt.setKindFlagBool(isBitfield) +} + +func (bt *btfType) FwdKind() FwdKind { + return FwdKind(bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift)) +} + +func (bt *btfType) SetFwdKind(kind FwdKind) { + bt.setInfo(uint32(kind), btfTypeKindFlagMask, btfTypeKindFlagShift) +} + +func (bt *btfType) Signed() bool { + return bt.kindFlagBool() +} + +func (bt *btfType) SetSigned(signed bool) { + bt.setKindFlagBool(signed) +} + +func (bt *btfType) Linkage() FuncLinkage { + return FuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift)) +} + +func (bt *btfType) SetLinkage(linkage FuncLinkage) { + bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift) +} + +func (bt *btfType) Type() TypeID { + // TODO: Panic here if wrong kind? + return TypeID(bt.SizeType) +} + +func (bt *btfType) SetType(id TypeID) { + bt.SizeType = uint32(id) +} + +func (bt *btfType) Size() uint32 { + // TODO: Panic here if wrong kind? + return bt.SizeType +} + +func (bt *btfType) SetSize(size uint32) { + bt.SizeType = size +} + +func (bt *btfType) Encode(buf []byte, bo binary.ByteOrder) (int, error) { + if len(buf) < btfTypeSize { + return 0, fmt.Errorf("not enough bytes to marshal btfType") + } + bo.PutUint32(buf[0:], bt.NameOff) + bo.PutUint32(buf[4:], bt.Info) + bo.PutUint32(buf[8:], bt.SizeType) + return btfTypeSize, nil +} + +// DataLen returns the length of additional type specific data in bytes. +func (bt *btfType) DataLen() (int, error) { + switch bt.Kind() { + case kindInt: + return int(unsafe.Sizeof(btfInt{})), nil + case kindPointer: + case kindArray: + return int(unsafe.Sizeof(btfArray{})), nil + case kindStruct: + fallthrough + case kindUnion: + return int(unsafe.Sizeof(btfMember{})) * bt.Vlen(), nil + case kindEnum: + return int(unsafe.Sizeof(btfEnum{})) * bt.Vlen(), nil + case kindForward: + case kindTypedef: + case kindVolatile: + case kindConst: + case kindRestrict: + case kindFunc: + case kindFuncProto: + return int(unsafe.Sizeof(btfParam{})) * bt.Vlen(), nil + case kindVar: + return int(unsafe.Sizeof(btfVariable{})), nil + case kindDatasec: + return int(unsafe.Sizeof(btfVarSecinfo{})) * bt.Vlen(), nil + case kindFloat: + case kindDeclTag: + return int(unsafe.Sizeof(btfDeclTag{})), nil + case kindTypeTag: + case kindEnum64: + return int(unsafe.Sizeof(btfEnum64{})) * bt.Vlen(), nil + default: + return 0, fmt.Errorf("unknown kind: %v", bt.Kind()) + } + + return 0, nil +} + +// btfInt encodes additional data for integers. +// +// ? ? ? ? e e e e o o o o o o o o ? ? ? ? ? ? ? ? b b b b b b b b +// ? = undefined +// e = encoding +// o = offset (bitfields?) +// b = bits (bitfields) +type btfInt struct { + Raw uint32 +} + +const ( + btfIntEncodingLen = 4 + btfIntEncodingShift = 24 + btfIntOffsetLen = 8 + btfIntOffsetShift = 16 + btfIntBitsLen = 8 + btfIntBitsShift = 0 +) + +var btfIntLen = int(unsafe.Sizeof(btfInt{})) + +func unmarshalBtfInt(bi *btfInt, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfIntLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfInt") + } + + bi.Raw = bo.Uint32(b[0:]) + return btfIntLen, nil +} + +func (bi btfInt) Encoding() IntEncoding { + return IntEncoding(readBits(bi.Raw, btfIntEncodingLen, btfIntEncodingShift)) +} + +func (bi *btfInt) SetEncoding(e IntEncoding) { + bi.Raw = writeBits(uint32(bi.Raw), btfIntEncodingLen, btfIntEncodingShift, uint32(e)) +} + +func (bi btfInt) Offset() Bits { + return Bits(readBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift)) +} + +func (bi *btfInt) SetOffset(offset uint32) { + bi.Raw = writeBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift, offset) +} + +func (bi btfInt) Bits() Bits { + return Bits(readBits(bi.Raw, btfIntBitsLen, btfIntBitsShift)) +} + +func (bi *btfInt) SetBits(bits byte) { + bi.Raw = writeBits(bi.Raw, btfIntBitsLen, btfIntBitsShift, uint32(bits)) +} + +type btfArray struct { + Type TypeID + IndexType TypeID + Nelems uint32 +} + +var btfArrayLen = int(unsafe.Sizeof(btfArray{})) + +func unmarshalBtfArray(ba *btfArray, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfArrayLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfArray") + } + + ba.Type = TypeID(bo.Uint32(b[0:])) + ba.IndexType = TypeID(bo.Uint32(b[4:])) + ba.Nelems = bo.Uint32(b[8:]) + return btfArrayLen, nil +} + +type btfMember struct { + NameOff uint32 + Type TypeID + Offset uint32 +} + +var btfMemberLen = int(unsafe.Sizeof(btfMember{})) + +func unmarshalBtfMember(bm *btfMember, b []byte, bo binary.ByteOrder) (int, error) { + if btfMemberLen > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfMember") + } + + bm.NameOff = bo.Uint32(b[0:]) + bm.Type = TypeID(bo.Uint32(b[4:])) + bm.Offset = bo.Uint32(b[8:]) + return btfMemberLen, nil +} + +type btfVarSecinfo struct { + Type TypeID + Offset uint32 + Size uint32 +} + +var btfVarSecinfoLen = int(unsafe.Sizeof(btfVarSecinfo{})) + +func unmarshalBtfVarSecInfo(bvsi *btfVarSecinfo, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfVarSecinfoLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfVarSecinfo") + } + + bvsi.Type = TypeID(bo.Uint32(b[0:])) + bvsi.Offset = bo.Uint32(b[4:]) + bvsi.Size = bo.Uint32(b[8:]) + return btfVarSecinfoLen, nil +} + +type btfVariable struct { + Linkage uint32 +} + +var btfVariableLen = int(unsafe.Sizeof(btfVariable{})) + +func unmarshalBtfVariable(bv *btfVariable, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfVariableLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfVariable") + } + + bv.Linkage = bo.Uint32(b[0:]) + return btfVariableLen, nil +} + +type btfEnum struct { + NameOff uint32 + Val uint32 +} + +var btfEnumLen = int(unsafe.Sizeof(btfEnum{})) + +func unmarshalBtfEnum(be *btfEnum, b []byte, bo binary.ByteOrder) (int, error) { + if btfEnumLen > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfEnum") + } + + be.NameOff = bo.Uint32(b[0:]) + be.Val = bo.Uint32(b[4:]) + return btfEnumLen, nil +} + +type btfEnum64 struct { + NameOff uint32 + ValLo32 uint32 + ValHi32 uint32 +} + +var btfEnum64Len = int(unsafe.Sizeof(btfEnum64{})) + +func unmarshalBtfEnum64(enum *btfEnum64, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfEnum64Len { + return 0, fmt.Errorf("not enough bytes to unmarshal btfEnum64") + } + + enum.NameOff = bo.Uint32(b[0:]) + enum.ValLo32 = bo.Uint32(b[4:]) + enum.ValHi32 = bo.Uint32(b[8:]) + + return btfEnum64Len, nil +} + +type btfParam struct { + NameOff uint32 + Type TypeID +} + +var btfParamLen = int(unsafe.Sizeof(btfParam{})) + +func unmarshalBtfParam(param *btfParam, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfParamLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfParam") + } + + param.NameOff = bo.Uint32(b[0:]) + param.Type = TypeID(bo.Uint32(b[4:])) + + return btfParamLen, nil +} + +type btfDeclTag struct { + ComponentIdx uint32 +} + +var btfDeclTagLen = int(unsafe.Sizeof(btfDeclTag{})) + +func unmarshalBtfDeclTag(bdt *btfDeclTag, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfDeclTagLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfDeclTag") + } + + bdt.ComponentIdx = bo.Uint32(b[0:]) + return btfDeclTagLen, nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/btf/btf_types_string.go b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/btf_types_string.go new file mode 100644 index 000000000..a9d2d82b6 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/btf_types_string.go @@ -0,0 +1,83 @@ +// Code generated by "stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage,btfKind"; DO NOT EDIT. + +package btf + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[StaticFunc-0] + _ = x[GlobalFunc-1] + _ = x[ExternFunc-2] +} + +const _FuncLinkage_name = "staticglobalextern" + +var _FuncLinkage_index = [...]uint8{0, 6, 12, 18} + +func (i FuncLinkage) String() string { + idx := int(i) - 0 + if i < 0 || idx >= len(_FuncLinkage_index)-1 { + return "FuncLinkage(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _FuncLinkage_name[_FuncLinkage_index[idx]:_FuncLinkage_index[idx+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[StaticVar-0] + _ = x[GlobalVar-1] + _ = x[ExternVar-2] +} + +const _VarLinkage_name = "staticglobalextern" + +var _VarLinkage_index = [...]uint8{0, 6, 12, 18} + +func (i VarLinkage) String() string { + idx := int(i) - 0 + if i < 0 || idx >= len(_VarLinkage_index)-1 { + return "VarLinkage(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _VarLinkage_name[_VarLinkage_index[idx]:_VarLinkage_index[idx+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[kindUnknown-0] + _ = x[kindInt-1] + _ = x[kindPointer-2] + _ = x[kindArray-3] + _ = x[kindStruct-4] + _ = x[kindUnion-5] + _ = x[kindEnum-6] + _ = x[kindForward-7] + _ = x[kindTypedef-8] + _ = x[kindVolatile-9] + _ = x[kindConst-10] + _ = x[kindRestrict-11] + _ = x[kindFunc-12] + _ = x[kindFuncProto-13] + _ = x[kindVar-14] + _ = x[kindDatasec-15] + _ = x[kindFloat-16] + _ = x[kindDeclTag-17] + _ = x[kindTypeTag-18] + _ = x[kindEnum64-19] +} + +const _btfKind_name = "UnknownIntPointerArrayStructUnionEnumForwardTypedefVolatileConstRestrictFuncFuncProtoVarDatasecFloatDeclTagTypeTagEnum64" + +var _btfKind_index = [...]uint8{0, 7, 10, 17, 22, 28, 33, 37, 44, 51, 59, 64, 72, 76, 85, 88, 95, 100, 107, 114, 120} + +func (i btfKind) String() string { + idx := int(i) - 0 + if i < 0 || idx >= len(_btfKind_index)-1 { + return "btfKind(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _btfKind_name[_btfKind_index[idx]:_btfKind_index[idx+1]] +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/btf/core.go b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/core.go new file mode 100644 index 000000000..f128011dd --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/core.go @@ -0,0 +1,1264 @@ +package btf + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + "reflect" + "strconv" + "strings" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/platform" +) + +// Code in this file is derived from libbpf, which is available under a BSD +// 2-Clause license. + +// A constant used when CO-RE relocation has to remove instructions. +// +// Taken from libbpf. +const COREBadRelocationSentinel = 0xbad2310 + +// COREFixup is the result of computing a CO-RE relocation for a target. +type COREFixup struct { + kind coreKind + local uint64 + target uint64 + // True if there is no valid fixup. The instruction is replaced with an + // invalid dummy. + poison bool + // True if the validation of the local value should be skipped. Used by + // some kinds of bitfield relocations. + skipLocalValidation bool +} + +func (f *COREFixup) equal(other COREFixup) bool { + return f.local == other.local && f.target == other.target +} + +func (f *COREFixup) String() string { + if f.poison { + return fmt.Sprintf("%s=poison", f.kind) + } + return fmt.Sprintf("%s=%d->%d", f.kind, f.local, f.target) +} + +func (f *COREFixup) Apply(ins *asm.Instruction) error { + if !platform.IsLinux { + return fmt.Errorf("CO-RE fixup: %w", internal.ErrNotSupportedOnOS) + } + + if f.poison { + // Relocation is poisoned, replace the instruction with an invalid one. + if ins.OpCode.IsDWordLoad() { + // Replace a dword load with a invalid dword load to preserve instruction size. + *ins = asm.LoadImm(asm.R10, COREBadRelocationSentinel, asm.DWord) + } else { + // Replace all single size instruction with a invalid call instruction. + *ins = asm.BuiltinFunc(COREBadRelocationSentinel).Call() + } + + // Add context to the kernel verifier output. + if source := ins.Source(); source != nil { + *ins = ins.WithSource(asm.Comment(fmt.Sprintf("instruction poisoned by CO-RE: %s", source))) + } else { + *ins = ins.WithSource(asm.Comment("instruction poisoned by CO-RE")) + } + + return nil + } + + switch class := ins.OpCode.Class(); class { + case asm.LdXClass, asm.StClass, asm.StXClass: + if want := int16(f.local); !f.skipLocalValidation && want != ins.Offset { + return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, f.local) + } + + if f.target > math.MaxInt16 { + return fmt.Errorf("offset %d exceeds MaxInt16", f.target) + } + + ins.Offset = int16(f.target) + + case asm.LdClass: + if !ins.IsConstantLoad(asm.DWord) { + return fmt.Errorf("not a dword-sized immediate load") + } + + if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant { + return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v)", ins.Constant, want, f) + } + + ins.Constant = int64(f.target) + + case asm.ALUClass: + if ins.OpCode.ALUOp() == asm.Swap { + return fmt.Errorf("relocation against swap") + } + + fallthrough + + case asm.ALU64Class: + if src := ins.OpCode.Source(); src != asm.ImmSource { + return fmt.Errorf("invalid source %s", src) + } + + if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant { + return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v, kind: %v, ins: %v)", ins.Constant, want, f, f.kind, ins) + } + + if f.target > math.MaxInt32 { + return fmt.Errorf("immediate %d exceeds MaxInt32", f.target) + } + + ins.Constant = int64(f.target) + + default: + return fmt.Errorf("invalid class %s", class) + } + + return nil +} + +func (f COREFixup) isNonExistant() bool { + return f.kind.checksForExistence() && f.target == 0 +} + +// coreKind is the type of CO-RE relocation as specified in BPF source code. +type coreKind uint32 + +const ( + reloFieldByteOffset coreKind = iota /* field byte offset */ + reloFieldByteSize /* field size in bytes */ + reloFieldExists /* field existence in target kernel */ + reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */ + reloFieldLShiftU64 /* bitfield-specific left bitshift */ + reloFieldRShiftU64 /* bitfield-specific right bitshift */ + reloTypeIDLocal /* type ID in local BPF object */ + reloTypeIDTarget /* type ID in target kernel */ + reloTypeExists /* type existence in target kernel */ + reloTypeSize /* type size in bytes */ + reloEnumvalExists /* enum value existence in target kernel */ + reloEnumvalValue /* enum value integer value */ + reloTypeMatches /* type matches kernel type */ +) + +func (k coreKind) checksForExistence() bool { + return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists || k == reloTypeMatches +} + +func (k coreKind) String() string { + switch k { + case reloFieldByteOffset: + return "byte_off" + case reloFieldByteSize: + return "byte_sz" + case reloFieldExists: + return "field_exists" + case reloFieldSigned: + return "signed" + case reloFieldLShiftU64: + return "lshift_u64" + case reloFieldRShiftU64: + return "rshift_u64" + case reloTypeIDLocal: + return "local_type_id" + case reloTypeIDTarget: + return "target_type_id" + case reloTypeExists: + return "type_exists" + case reloTypeSize: + return "type_size" + case reloEnumvalExists: + return "enumval_exists" + case reloEnumvalValue: + return "enumval_value" + case reloTypeMatches: + return "type_matches" + default: + return fmt.Sprintf("unknown (%d)", k) + } +} + +// CORERelocate calculates changes needed to adjust eBPF instructions for differences +// in types. +// +// targets forms the set of types to relocate against. The first element has to be +// BTF for vmlinux, the following must be types for kernel modules. +// +// resolveLocalTypeID is called for each local type which requires a stable TypeID. +// Calling the function with the same type multiple times must produce the same +// result. It is the callers responsibility to ensure that the relocated instructions +// are loaded with matching BTF. +// +// Returns a list of fixups which can be applied to instructions to make them +// match the target type(s). +// +// Fixups are returned in the order of relos, e.g. fixup[i] is the solution +// for relos[i]. +func CORERelocate(relos []*CORERelocation, targets []*Spec, bo binary.ByteOrder, resolveLocalTypeID func(Type) (TypeID, error)) ([]COREFixup, error) { + if len(targets) == 0 { + // Explicitly check for nil here since the argument used to be optional. + return nil, fmt.Errorf("targets must be provided") + } + + // We can't encode type IDs that aren't for vmlinux into instructions at the + // moment. + resolveTargetTypeID := targets[0].TypeID + + for _, target := range targets { + if bo != target.byteOrder { + return nil, fmt.Errorf("can't relocate %s against %s", bo, target.byteOrder) + } + } + + type reloGroup struct { + relos []*CORERelocation + // Position of each relocation in relos. + indices []int + } + + // Split relocations into per Type lists. + relosByType := make(map[Type]*reloGroup) + result := make([]COREFixup, len(relos)) + for i, relo := range relos { + if relo.kind == reloTypeIDLocal { + // Filtering out reloTypeIDLocal here makes our lives a lot easier + // down the line, since it doesn't have a target at all. + if len(relo.accessor) > 1 || relo.accessor[0] != 0 { + return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor) + } + + id, err := resolveLocalTypeID(relo.typ) + if err != nil { + return nil, fmt.Errorf("%s: get type id: %w", relo.kind, err) + } + + result[i] = COREFixup{ + kind: relo.kind, + local: uint64(relo.id), + target: uint64(id), + } + continue + } + + group, ok := relosByType[relo.typ] + if !ok { + group = &reloGroup{} + relosByType[relo.typ] = group + } + group.relos = append(group.relos, relo) + group.indices = append(group.indices, i) + } + + for localType, group := range relosByType { + localTypeName := localType.TypeName() + if localTypeName == "" { + return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported) + } + + essentialName := newEssentialName(localTypeName) + + var targetTypes []Type + for _, target := range targets { + namedTypes, err := target.TypesByName(essentialName) + if errors.Is(err, ErrNotFound) { + continue + } else if err != nil { + return nil, err + } + + targetTypes = append(targetTypes, namedTypes...) + } + + fixups, err := coreCalculateFixups(group.relos, targetTypes, bo, resolveTargetTypeID) + if err != nil { + return nil, fmt.Errorf("relocate %s: %w", localType, err) + } + + for j, index := range group.indices { + result[index] = fixups[j] + } + } + + return result, nil +} + +var errAmbiguousRelocation = errors.New("ambiguous relocation") +var errImpossibleRelocation = errors.New("impossible relocation") +var errIncompatibleTypes = errors.New("incompatible types") + +// coreCalculateFixups finds the target type that best matches all relocations. +// +// All relos must target the same type. +// +// The best target is determined by scoring: the less poisoning we have to do +// the better the target is. +func coreCalculateFixups(relos []*CORERelocation, targets []Type, bo binary.ByteOrder, resolveTargetTypeID func(Type) (TypeID, error)) ([]COREFixup, error) { + bestScore := len(relos) + var bestFixups []COREFixup + for _, target := range targets { + score := 0 // lower is better + fixups := make([]COREFixup, 0, len(relos)) + for _, relo := range relos { + fixup, err := coreCalculateFixup(relo, target, bo, resolveTargetTypeID) + if err != nil { + return nil, fmt.Errorf("target %s: %s: %w", target, relo.kind, err) + } + if fixup.poison || fixup.isNonExistant() { + score++ + } + fixups = append(fixups, fixup) + } + + if score > bestScore { + // We have a better target already, ignore this one. + continue + } + + if score < bestScore { + // This is the best target yet, use it. + bestScore = score + bestFixups = fixups + continue + } + + // Some other target has the same score as the current one. Make sure + // the fixups agree with each other. + for i, fixup := range bestFixups { + if !fixup.equal(fixups[i]) { + return nil, fmt.Errorf("%s: multiple types match: %w", fixup.kind, errAmbiguousRelocation) + } + } + } + + if bestFixups == nil { + // Nothing at all matched, probably because there are no suitable + // targets at all. + // + // Poison everything except checksForExistence. + bestFixups = make([]COREFixup, len(relos)) + for i, relo := range relos { + if relo.kind.checksForExistence() { + bestFixups[i] = COREFixup{kind: relo.kind, local: 1, target: 0} + } else { + bestFixups[i] = COREFixup{kind: relo.kind, poison: true} + } + } + } + + return bestFixups, nil +} + +var errNoSignedness = errors.New("no signedness") + +// coreCalculateFixup calculates the fixup given a relocation and a target type. +func coreCalculateFixup(relo *CORERelocation, target Type, bo binary.ByteOrder, resolveTargetTypeID func(Type) (TypeID, error)) (COREFixup, error) { + fixup := func(local, target uint64) (COREFixup, error) { + return COREFixup{kind: relo.kind, local: local, target: target}, nil + } + fixupWithoutValidation := func(local, target uint64) (COREFixup, error) { + return COREFixup{kind: relo.kind, local: local, target: target, skipLocalValidation: true}, nil + } + poison := func() (COREFixup, error) { + if relo.kind.checksForExistence() { + return fixup(1, 0) + } + return COREFixup{kind: relo.kind, poison: true}, nil + } + zero := COREFixup{} + + local := relo.typ + + switch relo.kind { + case reloTypeMatches: + if len(relo.accessor) > 1 || relo.accessor[0] != 0 { + return zero, fmt.Errorf("unexpected accessor %v", relo.accessor) + } + + err := coreTypesMatch(local, target, nil) + if errors.Is(err, errIncompatibleTypes) { + return poison() + } + if err != nil { + return zero, err + } + + return fixup(1, 1) + + case reloTypeIDTarget, reloTypeSize, reloTypeExists: + if len(relo.accessor) > 1 || relo.accessor[0] != 0 { + return zero, fmt.Errorf("unexpected accessor %v", relo.accessor) + } + + err := CheckTypeCompatibility(local, target) + if errors.Is(err, errIncompatibleTypes) { + return poison() + } + if err != nil { + return zero, err + } + + switch relo.kind { + case reloTypeExists: + return fixup(1, 1) + + case reloTypeIDTarget: + targetID, err := resolveTargetTypeID(target) + if errors.Is(err, ErrNotFound) { + // Probably a relocation trying to get the ID + // of a type from a kmod. + return poison() + } + if err != nil { + return zero, err + } + return fixup(uint64(relo.id), uint64(targetID)) + + case reloTypeSize: + localSize, err := Sizeof(local) + if err != nil { + return zero, err + } + + targetSize, err := Sizeof(target) + if err != nil { + return zero, err + } + + return fixup(uint64(localSize), uint64(targetSize)) + } + + case reloEnumvalValue, reloEnumvalExists: + localValue, targetValue, err := coreFindEnumValue(local, relo.accessor, target) + if errors.Is(err, errImpossibleRelocation) { + return poison() + } + if err != nil { + return zero, err + } + + switch relo.kind { + case reloEnumvalExists: + return fixup(1, 1) + + case reloEnumvalValue: + return fixup(localValue.Value, targetValue.Value) + } + + case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64, reloFieldSigned: + if _, ok := As[*Fwd](target); ok { + // We can't relocate fields using a forward declaration, so + // skip it. If a non-forward declaration is present in the BTF + // we'll find it in one of the other iterations. + return poison() + } + + localField, targetField, err := coreFindField(local, relo.accessor, target) + if errors.Is(err, errImpossibleRelocation) { + return poison() + } + if err != nil { + return zero, err + } + + maybeSkipValidation := func(f COREFixup, err error) (COREFixup, error) { + f.skipLocalValidation = localField.bitfieldSize > 0 + return f, err + } + + switch relo.kind { + case reloFieldExists: + return fixup(1, 1) + + case reloFieldByteOffset: + return maybeSkipValidation(fixup(uint64(localField.offset), uint64(targetField.offset))) + + case reloFieldByteSize: + localSize, err := Sizeof(localField.Type) + if err != nil { + return zero, err + } + + targetSize, err := Sizeof(targetField.Type) + if err != nil { + return zero, err + } + return maybeSkipValidation(fixup(uint64(localSize), uint64(targetSize))) + + case reloFieldLShiftU64: + var target uint64 + if bo == binary.LittleEndian { + targetSize, err := targetField.sizeBits() + if err != nil { + return zero, err + } + + target = uint64(64 - targetField.bitfieldOffset - targetSize) + } else { + loadWidth, err := Sizeof(targetField.Type) + if err != nil { + return zero, err + } + + target = uint64(64 - Bits(loadWidth*8) + targetField.bitfieldOffset) + } + return fixupWithoutValidation(0, target) + + case reloFieldRShiftU64: + targetSize, err := targetField.sizeBits() + if err != nil { + return zero, err + } + + return fixupWithoutValidation(0, uint64(64-targetSize)) + + case reloFieldSigned: + switch local := UnderlyingType(localField.Type).(type) { + case *Enum: + target, ok := As[*Enum](targetField.Type) + if !ok { + return zero, fmt.Errorf("target isn't *Enum but %T", targetField.Type) + } + + return fixup(boolToUint64(local.Signed), boolToUint64(target.Signed)) + case *Int: + target, ok := As[*Int](targetField.Type) + if !ok { + return zero, fmt.Errorf("target isn't *Int but %T", targetField.Type) + } + + return fixup( + uint64(local.Encoding&Signed), + uint64(target.Encoding&Signed), + ) + default: + return zero, fmt.Errorf("type %T: %w", local, errNoSignedness) + } + } + } + + return zero, ErrNotSupported +} + +func boolToUint64(val bool) uint64 { + if val { + return 1 + } + return 0 +} + +/* coreAccessor contains a path through a struct. It contains at least one index. + * + * The interpretation depends on the kind of the relocation. The following is + * taken from struct bpf_core_relo in libbpf_internal.h: + * + * - for field-based relocations, string encodes an accessed field using + * a sequence of field and array indices, separated by colon (:). It's + * conceptually very close to LLVM's getelementptr ([0]) instruction's + * arguments for identifying offset to a field. + * - for type-based relocations, strings is expected to be just "0"; + * - for enum value-based relocations, string contains an index of enum + * value within its enum type; + * + * Example to provide a better feel. + * + * struct sample { + * int a; + * struct { + * int b[10]; + * }; + * }; + * + * struct sample s = ...; + * int x = &s->a; // encoded as "0:0" (a is field #0) + * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1, + * // b is field #0 inside anon struct, accessing elem #5) + * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array) + */ +type coreAccessor []int + +func parseCOREAccessor(accessor string) (coreAccessor, error) { + if accessor == "" { + return nil, fmt.Errorf("empty accessor") + } + + parts := strings.Split(accessor, ":") + result := make(coreAccessor, 0, len(parts)) + for _, part := range parts { + // 31 bits to avoid overflowing int on 32 bit platforms. + index, err := strconv.ParseUint(part, 10, 31) + if err != nil { + return nil, fmt.Errorf("accessor index %q: %s", part, err) + } + + result = append(result, int(index)) + } + + return result, nil +} + +func (ca coreAccessor) String() string { + strs := make([]string, 0, len(ca)) + for _, i := range ca { + strs = append(strs, strconv.Itoa(i)) + } + return strings.Join(strs, ":") +} + +func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) { + e, ok := As[*Enum](t) + if !ok { + return nil, fmt.Errorf("not an enum: %s", t) + } + + if len(ca) > 1 { + return nil, fmt.Errorf("invalid accessor %s for enum", ca) + } + + i := ca[0] + if i >= len(e.Values) { + return nil, fmt.Errorf("invalid index %d for %s", i, e) + } + + return &e.Values[i], nil +} + +// coreField represents the position of a "child" of a composite type from the +// start of that type. +// +// /- start of composite +// | offset * 8 | bitfieldOffset | bitfieldSize | ... | +// \- start of field end of field -/ +type coreField struct { + Type Type + + // The position of the field from the start of the composite type in bytes. + offset uint32 + + // The offset of the bitfield in bits from the start of the field. + bitfieldOffset Bits + + // The size of the bitfield in bits. + // + // Zero if the field is not a bitfield. + bitfieldSize Bits +} + +func (cf *coreField) adjustOffsetToNthElement(n int) error { + if n == 0 { + return nil + } + + size, err := Sizeof(cf.Type) + if err != nil { + return err + } + + cf.offset += uint32(n) * uint32(size) + return nil +} + +func (cf *coreField) adjustOffsetBits(offset Bits) error { + align, err := alignof(cf.Type) + if err != nil { + return err + } + + // We can compute the load offset by: + // 1) converting the bit offset to bytes with a flooring division. + // 2) dividing and multiplying that offset by the alignment, yielding the + // load size aligned offset. + offsetBytes := uint32(offset/8) / uint32(align) * uint32(align) + + // The number of bits remaining is the bit offset less the number of bits + // we can "skip" with the aligned offset. + cf.bitfieldOffset = offset - Bits(offsetBytes*8) + + // We know that cf.offset is aligned at to at least align since we get it + // from the compiler via BTF. Adding an aligned offsetBytes preserves the + // alignment. + cf.offset += offsetBytes + return nil +} + +func (cf *coreField) sizeBits() (Bits, error) { + if cf.bitfieldSize > 0 { + return cf.bitfieldSize, nil + } + + // Someone is trying to access a non-bitfield via a bit shift relocation. + // This happens when a field changes from a bitfield to a regular field + // between kernel versions. Synthesise the size to make the shifts work. + size, err := Sizeof(cf.Type) + if err != nil { + return 0, err + } + return Bits(size * 8), nil +} + +// coreFindField descends into the local type using the accessor and tries to +// find an equivalent field in target at each step. +// +// Returns the field and the offset of the field from the start of +// target in bits. +func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField, coreField, error) { + local := coreField{Type: localT} + target := coreField{Type: targetT} + + if err := coreAreMembersCompatible(local.Type, target.Type); err != nil { + return coreField{}, coreField{}, fmt.Errorf("fields: %w", err) + } + + // The first index is used to offset a pointer of the base type like + // when accessing an array. + if err := local.adjustOffsetToNthElement(localAcc[0]); err != nil { + return coreField{}, coreField{}, err + } + + if err := target.adjustOffsetToNthElement(localAcc[0]); err != nil { + return coreField{}, coreField{}, err + } + + var localMaybeFlex, targetMaybeFlex bool + for i, acc := range localAcc[1:] { + switch localType := UnderlyingType(local.Type).(type) { + case composite: + // For composite types acc is used to find the field in the local type, + // and then we try to find a field in target with the same name. + localMembers := localType.members() + if acc >= len(localMembers) { + return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, localType) + } + + localMember := localMembers[acc] + if localMember.Name == "" { + localMemberType, ok := As[composite](localMember.Type) + if !ok { + return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported) + } + + // This is an anonymous struct or union, ignore it. + local = coreField{ + Type: localMemberType, + offset: local.offset + localMember.Offset.Bytes(), + } + localMaybeFlex = false + continue + } + + targetType, ok := As[composite](target.Type) + if !ok { + return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation) + } + + targetMember, last, err := coreFindMember(targetType, localMember.Name) + if err != nil { + return coreField{}, coreField{}, err + } + + local = coreField{ + Type: localMember.Type, + offset: local.offset, + bitfieldSize: localMember.BitfieldSize, + } + localMaybeFlex = acc == len(localMembers)-1 + + target = coreField{ + Type: targetMember.Type, + offset: target.offset, + bitfieldSize: targetMember.BitfieldSize, + } + targetMaybeFlex = last + + if local.bitfieldSize == 0 && target.bitfieldSize == 0 { + local.offset += localMember.Offset.Bytes() + target.offset += targetMember.Offset.Bytes() + break + } + + // Either of the members is a bitfield. Make sure we're at the + // end of the accessor. + if next := i + 1; next < len(localAcc[1:]) { + return coreField{}, coreField{}, fmt.Errorf("can't descend into bitfield") + } + + if err := local.adjustOffsetBits(localMember.Offset); err != nil { + return coreField{}, coreField{}, err + } + + if err := target.adjustOffsetBits(targetMember.Offset); err != nil { + return coreField{}, coreField{}, err + } + + case *Array: + // For arrays, acc is the index in the target. + targetType, ok := As[*Array](target.Type) + if !ok { + return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation) + } + + if localType.Nelems == 0 && !localMaybeFlex { + return coreField{}, coreField{}, fmt.Errorf("local type has invalid flexible array") + } + if targetType.Nelems == 0 && !targetMaybeFlex { + return coreField{}, coreField{}, fmt.Errorf("target type has invalid flexible array") + } + + if localType.Nelems > 0 && acc >= int(localType.Nelems) { + return coreField{}, coreField{}, fmt.Errorf("invalid access of %s at index %d", localType, acc) + } + if targetType.Nelems > 0 && acc >= int(targetType.Nelems) { + return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation) + } + + local = coreField{ + Type: localType.Type, + offset: local.offset, + } + localMaybeFlex = false + + if err := local.adjustOffsetToNthElement(acc); err != nil { + return coreField{}, coreField{}, err + } + + target = coreField{ + Type: targetType.Type, + offset: target.offset, + } + targetMaybeFlex = false + + if err := target.adjustOffsetToNthElement(acc); err != nil { + return coreField{}, coreField{}, err + } + + default: + return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported) + } + + if err := coreAreMembersCompatible(local.Type, target.Type); err != nil { + return coreField{}, coreField{}, err + } + } + + return local, target, nil +} + +// coreFindMember finds a member in a composite type while handling anonymous +// structs and unions. +func coreFindMember(typ composite, name string) (Member, bool, error) { + if name == "" { + return Member{}, false, errors.New("can't search for anonymous member") + } + + type offsetTarget struct { + composite + offset Bits + } + + targets := []offsetTarget{{typ, 0}} + visited := make(map[composite]bool) + + for i := 0; i < len(targets); i++ { + target := targets[i] + + // Only visit targets once to prevent infinite recursion. + if visited[target] { + continue + } + if len(visited) >= maxResolveDepth { + // This check is different than libbpf, which restricts the entire + // path to BPF_CORE_SPEC_MAX_LEN items. + return Member{}, false, fmt.Errorf("type is nested too deep") + } + visited[target] = true + + members := target.members() + for j, member := range members { + if member.Name == name { + // NB: This is safe because member is a copy. + member.Offset += target.offset + return member, j == len(members)-1, nil + } + + // The names don't match, but this member could be an anonymous struct + // or union. + if member.Name != "" { + continue + } + + comp, ok := As[composite](member.Type) + if !ok { + return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type) + } + + targets = append(targets, offsetTarget{comp, target.offset + member.Offset}) + } + } + + return Member{}, false, fmt.Errorf("no matching member: %w", errImpossibleRelocation) +} + +// coreFindEnumValue follows localAcc to find the equivalent enum value in target. +func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localValue, targetValue *EnumValue, _ error) { + localValue, err := localAcc.enumValue(local) + if err != nil { + return nil, nil, err + } + + targetEnum, ok := As[*Enum](target) + if !ok { + return nil, nil, errImpossibleRelocation + } + + localName := newEssentialName(localValue.Name) + for i, targetValue := range targetEnum.Values { + if newEssentialName(targetValue.Name) != localName { + continue + } + + return localValue, &targetEnum.Values[i], nil + } + + return nil, nil, errImpossibleRelocation +} + +// CheckTypeCompatibility checks local and target types for Compatibility according to CO-RE rules. +// +// Only layout compatibility is checked, ignoring names of the root type. +func CheckTypeCompatibility(localType Type, targetType Type) error { + return coreAreTypesCompatible(localType, targetType, nil) +} + +type pair struct { + A, B Type +} + +/* The comment below is from bpf_core_types_are_compat in libbpf.c: + * + * Check local and target types for compatibility. This check is used for + * type-based CO-RE relocations and follow slightly different rules than + * field-based relocations. This function assumes that root types were already + * checked for name match. Beyond that initial root-level name check, names + * are completely ignored. Compatibility rules are as follows: + * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but + * kind should match for local and target types (i.e., STRUCT is not + * compatible with UNION); + * - for ENUMs, the size is ignored; + * - for INT, size and signedness are ignored; + * - for ARRAY, dimensionality is ignored, element types are checked for + * compatibility recursively; + * - CONST/VOLATILE/RESTRICT modifiers are ignored; + * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; + * - FUNC_PROTOs are compatible if they have compatible signature: same + * number of input args and compatible return and argument types. + * These rules are not set in stone and probably will be adjusted as we get + * more experience with using BPF CO-RE relocations. + * + * Returns errIncompatibleTypes if types are not compatible. + */ +func coreAreTypesCompatible(localType Type, targetType Type, visited map[pair]struct{}) error { + localType = UnderlyingType(localType) + targetType = UnderlyingType(targetType) + + if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { + return fmt.Errorf("type mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + if _, ok := visited[pair{localType, targetType}]; ok { + return nil + } + if visited == nil { + visited = make(map[pair]struct{}) + } + visited[pair{localType, targetType}] = struct{}{} + + switch lv := localType.(type) { + case *Void, *Struct, *Union, *Enum, *Fwd, *Int: + return nil + + case *Pointer: + tv := targetType.(*Pointer) + return coreAreTypesCompatible(lv.Target, tv.Target, visited) + + case *Array: + tv := targetType.(*Array) + if err := coreAreTypesCompatible(lv.Index, tv.Index, visited); err != nil { + return err + } + + return coreAreTypesCompatible(lv.Type, tv.Type, visited) + + case *FuncProto: + tv := targetType.(*FuncProto) + if err := coreAreTypesCompatible(lv.Return, tv.Return, visited); err != nil { + return err + } + + if len(lv.Params) != len(tv.Params) { + return fmt.Errorf("function param mismatch: %w", errIncompatibleTypes) + } + + for i, localParam := range lv.Params { + targetParam := tv.Params[i] + if err := coreAreTypesCompatible(localParam.Type, targetParam.Type, visited); err != nil { + return err + } + } + + return nil + + default: + return fmt.Errorf("unsupported type %T", localType) + } +} + +/* coreAreMembersCompatible checks two types for field-based relocation compatibility. + * + * The comment below is from bpf_core_fields_are_compat in libbpf.c: + * + * Check two types for compatibility for the purpose of field access + * relocation. const/volatile/restrict and typedefs are skipped to ensure we + * are relocating semantically compatible entities: + * - any two STRUCTs/UNIONs are compatible and can be mixed; + * - any two FWDs are compatible, if their names match (modulo flavor suffix); + * - any two PTRs are always compatible; + * - for ENUMs, names should be the same (ignoring flavor suffix) or at + * least one of enums should be anonymous; + * - for ENUMs, check sizes, names are ignored; + * - for INT, size and signedness are ignored; + * - any two FLOATs are always compatible; + * - for ARRAY, dimensionality is ignored, element types are checked for + * compatibility recursively; + * [ NB: coreAreMembersCompatible doesn't recurse, this check is done + * by coreFindField. ] + * - everything else shouldn't be ever a target of relocation. + * These rules are not set in stone and probably will be adjusted as we get + * more experience with using BPF CO-RE relocations. + * + * Returns errImpossibleRelocation if the members are not compatible. + */ +func coreAreMembersCompatible(localType Type, targetType Type) error { + localType = UnderlyingType(localType) + targetType = UnderlyingType(targetType) + + _, lok := localType.(composite) + _, tok := targetType.(composite) + if lok && tok { + return nil + } + + if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { + return fmt.Errorf("type mismatch: %w", errImpossibleRelocation) + } + + switch lv := localType.(type) { + case *Array, *Pointer, *Float, *Int: + return nil + + case *Enum: + tv := targetType.(*Enum) + if !coreEssentialNamesMatch(lv.Name, tv.Name) { + return fmt.Errorf("names %q and %q don't match: %w", lv.Name, tv.Name, errImpossibleRelocation) + } + + return nil + + case *Fwd: + tv := targetType.(*Fwd) + if !coreEssentialNamesMatch(lv.Name, tv.Name) { + return fmt.Errorf("names %q and %q don't match: %w", lv.Name, tv.Name, errImpossibleRelocation) + } + + return nil + + default: + return fmt.Errorf("type %s: %w", localType, ErrNotSupported) + } +} + +// coreEssentialNamesMatch compares two names while ignoring their flavour suffix. +// +// This should only be used on names which are in the global scope, like struct +// names, typedefs or enum values. +func coreEssentialNamesMatch(a, b string) bool { + if a == "" || b == "" { + // allow anonymous and named type to match + return true + } + + return newEssentialName(a) == newEssentialName(b) +} + +/* The comment below is from __bpf_core_types_match in relo_core.c: + * + * Check that two types "match". This function assumes that root types were + * already checked for name match. + * + * The matching relation is defined as follows: + * - modifiers and typedefs are stripped (and, hence, effectively ignored) + * - generally speaking types need to be of same kind (struct vs. struct, union + * vs. union, etc.) + * - exceptions are struct/union behind a pointer which could also match a + * forward declaration of a struct or union, respectively, and enum vs. + * enum64 (see below) + * Then, depending on type: + * - integers: + * - match if size and signedness match + * - arrays & pointers: + * - target types are recursively matched + * - structs & unions: + * - local members need to exist in target with the same name + * - for each member we recursively check match unless it is already behind a + * pointer, in which case we only check matching names and compatible kind + * - enums: + * - local variants have to have a match in target by symbolic name (but not + * numeric value) + * - size has to match (but enum may match enum64 and vice versa) + * - function pointers: + * - number and position of arguments in local type has to match target + * - for each argument and the return value we recursively check match + */ +func coreTypesMatch(localType Type, targetType Type, visited map[pair]struct{}) error { + localType = UnderlyingType(localType) + targetType = UnderlyingType(targetType) + + if !coreEssentialNamesMatch(localType.TypeName(), targetType.TypeName()) { + return fmt.Errorf("type name %q don't match %q: %w", localType.TypeName(), targetType.TypeName(), errIncompatibleTypes) + } + + if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { + return fmt.Errorf("type mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + if _, ok := visited[pair{localType, targetType}]; ok { + return nil + } + if visited == nil { + visited = make(map[pair]struct{}) + } + visited[pair{localType, targetType}] = struct{}{} + + switch lv := (localType).(type) { + case *Void: + + case *Fwd: + if targetType.(*Fwd).Kind != lv.Kind { + return fmt.Errorf("fwd kind mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + case *Enum: + return coreEnumsMatch(lv, targetType.(*Enum)) + + case composite: + tv := targetType.(composite) + + if len(lv.members()) > len(tv.members()) { + return errIncompatibleTypes + } + + localMembers := lv.members() + targetMembers := map[string]Member{} + for _, member := range tv.members() { + targetMembers[member.Name] = member + } + + for _, localMember := range localMembers { + targetMember, found := targetMembers[localMember.Name] + if !found { + return fmt.Errorf("no field %q in %v: %w", localMember.Name, targetType, errIncompatibleTypes) + } + + err := coreTypesMatch(localMember.Type, targetMember.Type, visited) + if err != nil { + return err + } + } + + case *Int: + if !coreEncodingMatches(lv, targetType.(*Int)) { + return fmt.Errorf("int mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + case *Pointer: + tv := targetType.(*Pointer) + + // Allow a pointer to a forward declaration to match a struct + // or union. + if fwd, ok := As[*Fwd](lv.Target); ok && fwd.matches(tv.Target) { + return nil + } + + if fwd, ok := As[*Fwd](tv.Target); ok && fwd.matches(lv.Target) { + return nil + } + + return coreTypesMatch(lv.Target, tv.Target, visited) + + case *Array: + tv := targetType.(*Array) + + if lv.Nelems != tv.Nelems { + return fmt.Errorf("array mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + return coreTypesMatch(lv.Type, tv.Type, visited) + + case *FuncProto: + tv := targetType.(*FuncProto) + + if len(lv.Params) != len(tv.Params) { + return fmt.Errorf("function param mismatch: %w", errIncompatibleTypes) + } + + for i, lparam := range lv.Params { + if err := coreTypesMatch(lparam.Type, tv.Params[i].Type, visited); err != nil { + return err + } + } + + return coreTypesMatch(lv.Return, tv.Return, visited) + + default: + return fmt.Errorf("unsupported type %T", localType) + } + + return nil +} + +// coreEncodingMatches returns true if both ints have the same size and signedness. +// All encodings other than `Signed` are considered unsigned. +func coreEncodingMatches(local, target *Int) bool { + return local.Size == target.Size && (local.Encoding == Signed) == (target.Encoding == Signed) +} + +// coreEnumsMatch checks two enums match, which is considered to be the case if the following is true: +// - size has to match (but enum may match enum64 and vice versa) +// - local variants have to have a match in target by symbolic name (but not numeric value) +func coreEnumsMatch(local *Enum, target *Enum) error { + if local.Size != target.Size { + return fmt.Errorf("size mismatch between %v and %v: %w", local, target, errIncompatibleTypes) + } + + // If there are more values in the local than the target, there must be at least one value in the local + // that isn't in the target, and therefor the types are incompatible. + if len(local.Values) > len(target.Values) { + return fmt.Errorf("local has more values than target: %w", errIncompatibleTypes) + } + +outer: + for _, lv := range local.Values { + for _, rv := range target.Values { + if coreEssentialNamesMatch(lv.Name, rv.Name) { + continue outer + } + } + + return fmt.Errorf("no match for %v in %v: %w", lv, target, errIncompatibleTypes) + } + + return nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/doc.go b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/doc.go similarity index 71% rename from src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/doc.go rename to src/nvcgo/vendor/github.com/cilium/ebpf/btf/doc.go index ad2576cb2..b1f4b1fc3 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/doc.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/doc.go @@ -2,7 +2,4 @@ // // The canonical documentation lives in the Linux kernel repository and is // available at https://www.kernel.org/doc/html/latest/bpf/btf.html -// -// The API is very much unstable. You should only use this via the main -// ebpf library. package btf diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/btf/ext_info.go b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/ext_info.go new file mode 100644 index 000000000..6ff5e2b90 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/ext_info.go @@ -0,0 +1,832 @@ +package btf + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sort" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" +) + +// ExtInfos contains ELF section metadata. +type ExtInfos struct { + // The slices are sorted by offset in ascending order. + funcInfos map[string]FuncOffsets + lineInfos map[string]LineOffsets + relocationInfos map[string]CORERelocationInfos +} + +// loadExtInfosFromELF parses ext infos from the .BTF.ext section in an ELF. +// +// Returns an error wrapping ErrNotFound if no ext infos are present. +func loadExtInfosFromELF(file *internal.SafeELFFile, spec *Spec) (*ExtInfos, error) { + section := file.Section(".BTF.ext") + if section == nil { + return nil, fmt.Errorf("btf ext infos: %w", ErrNotFound) + } + + if section.ReaderAt == nil { + return nil, fmt.Errorf("compressed ext_info is not supported") + } + + return loadExtInfos(section.ReaderAt, file.ByteOrder, spec) +} + +// loadExtInfos parses bare ext infos. +func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, spec *Spec) (*ExtInfos, error) { + // Open unbuffered section reader. binary.Read() calls io.ReadFull on + // the header structs, resulting in one syscall per header. + headerRd := io.NewSectionReader(r, 0, math.MaxInt64) + extHeader, err := parseBTFExtHeader(headerRd, bo) + if err != nil { + return nil, fmt.Errorf("parsing BTF extension header: %w", err) + } + + coreHeader, err := parseBTFExtCOREHeader(headerRd, bo, extHeader) + if err != nil { + return nil, fmt.Errorf("parsing BTF CO-RE header: %w", err) + } + + buf := internal.NewBufferedSectionReader(r, extHeader.funcInfoStart(), int64(extHeader.FuncInfoLen)) + btfFuncInfos, err := parseFuncInfos(buf, bo, spec.strings) + if err != nil { + return nil, fmt.Errorf("parsing BTF function info: %w", err) + } + + funcInfos := make(map[string]FuncOffsets, len(btfFuncInfos)) + for section, bfis := range btfFuncInfos { + funcInfos[section], err = newFuncOffsets(bfis, spec) + if err != nil { + return nil, fmt.Errorf("section %s: func infos: %w", section, err) + } + } + + buf = internal.NewBufferedSectionReader(r, extHeader.lineInfoStart(), int64(extHeader.LineInfoLen)) + btfLineInfos, err := parseLineInfos(buf, bo, spec.strings) + if err != nil { + return nil, fmt.Errorf("parsing BTF line info: %w", err) + } + + lineInfos := make(map[string]LineOffsets, len(btfLineInfos)) + for section, blis := range btfLineInfos { + lineInfos[section], err = newLineInfos(blis, spec.strings) + if err != nil { + return nil, fmt.Errorf("section %s: line infos: %w", section, err) + } + } + + if coreHeader == nil || coreHeader.COREReloLen == 0 { + return &ExtInfos{funcInfos, lineInfos, nil}, nil + } + + var btfCORERelos map[string][]bpfCORERelo + buf = internal.NewBufferedSectionReader(r, extHeader.coreReloStart(coreHeader), int64(coreHeader.COREReloLen)) + btfCORERelos, err = parseCORERelos(buf, bo, spec.strings) + if err != nil { + return nil, fmt.Errorf("parsing CO-RE relocation info: %w", err) + } + + coreRelos := make(map[string]CORERelocationInfos, len(btfCORERelos)) + for section, brs := range btfCORERelos { + coreRelos[section], err = newRelocationInfos(brs, spec, spec.strings) + if err != nil { + return nil, fmt.Errorf("section %s: CO-RE relocations: %w", section, err) + } + } + + return &ExtInfos{funcInfos, lineInfos, coreRelos}, nil +} + +type ( + funcInfoMeta struct{} + coreRelocationMeta struct{} +) + +// Assign per-section metadata from BTF to a section's instructions. +func (ei *ExtInfos) Assign(insns asm.Instructions, section string) { + funcInfos := ei.funcInfos[section] + lineInfos := ei.lineInfos[section] + reloInfos := ei.relocationInfos[section] + + AssignMetadataToInstructions(insns, funcInfos, lineInfos, reloInfos) +} + +// Assign per-instruction metadata to the instructions in insns. +func AssignMetadataToInstructions( + insns asm.Instructions, + funcInfos FuncOffsets, + lineInfos LineOffsets, + reloInfos CORERelocationInfos, +) { + iter := insns.Iterate() + for iter.Next() { + if len(funcInfos) > 0 && funcInfos[0].Offset == iter.Offset { + *iter.Ins = WithFuncMetadata(*iter.Ins, funcInfos[0].Func) + funcInfos = funcInfos[1:] + } + + if len(lineInfos) > 0 && lineInfos[0].Offset == iter.Offset { + *iter.Ins = iter.Ins.WithSource(lineInfos[0].Line) + lineInfos = lineInfos[1:] + } + + if len(reloInfos.infos) > 0 && reloInfos.infos[0].offset == iter.Offset { + iter.Ins.Metadata.Set(coreRelocationMeta{}, reloInfos.infos[0].relo) + reloInfos.infos = reloInfos.infos[1:] + } + } +} + +// MarshalExtInfos encodes function and line info embedded in insns into kernel +// wire format. +// +// If an instruction has an [asm.Comment], it will be synthesized into a mostly +// empty line info. +func MarshalExtInfos(insns asm.Instructions, b *Builder) (funcInfos, lineInfos []byte, _ error) { + iter := insns.Iterate() + for iter.Next() { + if iter.Ins.Source() != nil || FuncMetadata(iter.Ins) != nil { + goto marshal + } + } + + return nil, nil, nil + +marshal: + var fiBuf, liBuf bytes.Buffer + for { + if fn := FuncMetadata(iter.Ins); fn != nil { + fi := &FuncOffset{ + Func: fn, + Offset: iter.Offset, + } + if err := fi.marshal(&fiBuf, b); err != nil { + return nil, nil, fmt.Errorf("write func info: %w", err) + } + } + + if source := iter.Ins.Source(); source != nil { + var line *Line + if l, ok := source.(*Line); ok { + line = l + } else { + line = &Line{ + line: source.String(), + } + } + + li := &LineOffset{ + Offset: iter.Offset, + Line: line, + } + if err := li.marshal(&liBuf, b); err != nil { + return nil, nil, fmt.Errorf("write line info: %w", err) + } + } + + if !iter.Next() { + break + } + } + + return fiBuf.Bytes(), liBuf.Bytes(), nil +} + +// btfExtHeader is found at the start of the .BTF.ext section. +type btfExtHeader struct { + Magic uint16 + Version uint8 + Flags uint8 + + // HdrLen is larger than the size of struct btfExtHeader when it is + // immediately followed by a btfExtCOREHeader. + HdrLen uint32 + + FuncInfoOff uint32 + FuncInfoLen uint32 + LineInfoOff uint32 + LineInfoLen uint32 +} + +// parseBTFExtHeader parses the header of the .BTF.ext section. +func parseBTFExtHeader(r io.Reader, bo binary.ByteOrder) (*btfExtHeader, error) { + var header btfExtHeader + if err := binary.Read(r, bo, &header); err != nil { + return nil, fmt.Errorf("can't read header: %v", err) + } + + if header.Magic != btfMagic { + return nil, fmt.Errorf("incorrect magic value %v", header.Magic) + } + + if header.Version != 1 { + return nil, fmt.Errorf("unexpected version %v", header.Version) + } + + if header.Flags != 0 { + return nil, fmt.Errorf("unsupported flags %v", header.Flags) + } + + if int64(header.HdrLen) < int64(binary.Size(&header)) { + return nil, fmt.Errorf("header length shorter than btfExtHeader size") + } + + return &header, nil +} + +// funcInfoStart returns the offset from the beginning of the .BTF.ext section +// to the start of its func_info entries. +func (h *btfExtHeader) funcInfoStart() int64 { + return int64(h.HdrLen + h.FuncInfoOff) +} + +// lineInfoStart returns the offset from the beginning of the .BTF.ext section +// to the start of its line_info entries. +func (h *btfExtHeader) lineInfoStart() int64 { + return int64(h.HdrLen + h.LineInfoOff) +} + +// coreReloStart returns the offset from the beginning of the .BTF.ext section +// to the start of its CO-RE relocation entries. +func (h *btfExtHeader) coreReloStart(ch *btfExtCOREHeader) int64 { + return int64(h.HdrLen + ch.COREReloOff) +} + +// btfExtCOREHeader is found right after the btfExtHeader when its HdrLen +// field is larger than its size. +type btfExtCOREHeader struct { + COREReloOff uint32 + COREReloLen uint32 +} + +// parseBTFExtCOREHeader parses the tail of the .BTF.ext header. If additional +// header bytes are present, extHeader.HdrLen will be larger than the struct, +// indicating the presence of a CO-RE extension header. +func parseBTFExtCOREHeader(r io.Reader, bo binary.ByteOrder, extHeader *btfExtHeader) (*btfExtCOREHeader, error) { + extHdrSize := int64(binary.Size(&extHeader)) + remainder := int64(extHeader.HdrLen) - extHdrSize + + if remainder == 0 { + return nil, nil + } + + var coreHeader btfExtCOREHeader + if err := binary.Read(r, bo, &coreHeader); err != nil { + return nil, fmt.Errorf("can't read header: %v", err) + } + + return &coreHeader, nil +} + +type btfExtInfoSec struct { + SecNameOff uint32 + NumInfo uint32 +} + +// parseExtInfoSec parses a btf_ext_info_sec header within .BTF.ext, +// appearing within func_info and line_info sub-sections. +// These headers appear once for each program section in the ELF and are +// followed by one or more func/line_info records for the section. +func parseExtInfoSec(r io.Reader, bo binary.ByteOrder, strings *stringTable) (string, *btfExtInfoSec, error) { + var infoHeader btfExtInfoSec + if err := binary.Read(r, bo, &infoHeader); err != nil { + return "", nil, fmt.Errorf("read ext info header: %w", err) + } + + secName, err := strings.Lookup(infoHeader.SecNameOff) + if err != nil { + return "", nil, fmt.Errorf("get section name: %w", err) + } + if secName == "" { + return "", nil, fmt.Errorf("extinfo header refers to empty section name") + } + + if infoHeader.NumInfo == 0 { + return "", nil, fmt.Errorf("section %s has zero records", secName) + } + + return secName, &infoHeader, nil +} + +// parseExtInfoRecordSize parses the uint32 at the beginning of a func_infos +// or line_infos segment that describes the length of all extInfoRecords in +// that segment. +func parseExtInfoRecordSize(r io.Reader, bo binary.ByteOrder) (uint32, error) { + const maxRecordSize = 256 + + var recordSize uint32 + if err := binary.Read(r, bo, &recordSize); err != nil { + return 0, fmt.Errorf("can't read record size: %v", err) + } + + if recordSize < 4 { + // Need at least InsnOff worth of bytes per record. + return 0, errors.New("record size too short") + } + if recordSize > maxRecordSize { + return 0, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize) + } + + return recordSize, nil +} + +// FuncOffsets is a sorted slice of FuncOffset. +type FuncOffsets []FuncOffset + +// The size of a FuncInfo in BTF wire format. +var FuncInfoSize = uint32(binary.Size(bpfFuncInfo{})) + +// FuncOffset represents a [btf.Func] and its raw instruction offset within a +// BPF program. +type FuncOffset struct { + Offset asm.RawInstructionOffset + Func *Func +} + +type bpfFuncInfo struct { + // Instruction offset of the function within an ELF section. + InsnOff uint32 + TypeID TypeID +} + +func newFuncOffset(fi bpfFuncInfo, spec *Spec) (*FuncOffset, error) { + typ, err := spec.TypeByID(fi.TypeID) + if err != nil { + return nil, err + } + + fn, ok := typ.(*Func) + if !ok { + return nil, fmt.Errorf("type ID %d is a %T, but expected a Func", fi.TypeID, typ) + } + + // C doesn't have anonymous functions, but check just in case. + if fn.Name == "" { + return nil, fmt.Errorf("func with type ID %d doesn't have a name", fi.TypeID) + } + + return &FuncOffset{ + asm.RawInstructionOffset(fi.InsnOff), + fn, + }, nil +} + +func newFuncOffsets(bfis []bpfFuncInfo, spec *Spec) (FuncOffsets, error) { + fos := make(FuncOffsets, 0, len(bfis)) + + for _, bfi := range bfis { + fi, err := newFuncOffset(bfi, spec) + if err != nil { + return FuncOffsets{}, fmt.Errorf("offset %d: %w", bfi.InsnOff, err) + } + fos = append(fos, *fi) + } + sort.Slice(fos, func(i, j int) bool { + return fos[i].Offset <= fos[j].Offset + }) + return fos, nil +} + +// LoadFuncInfos parses BTF func info from kernel wire format into a +// [FuncOffsets], a sorted slice of [btf.Func]s of (sub)programs within a BPF +// program with their corresponding raw instruction offsets. +func LoadFuncInfos(reader io.Reader, bo binary.ByteOrder, recordNum uint32, spec *Spec) (FuncOffsets, error) { + fis, err := parseFuncInfoRecords( + reader, + bo, + FuncInfoSize, + recordNum, + false, + ) + if err != nil { + return FuncOffsets{}, fmt.Errorf("parsing BTF func info: %w", err) + } + + return newFuncOffsets(fis, spec) +} + +// marshal into the BTF wire format. +func (fi *FuncOffset) marshal(w *bytes.Buffer, b *Builder) error { + id, err := b.Add(fi.Func) + if err != nil { + return err + } + bfi := bpfFuncInfo{ + InsnOff: uint32(fi.Offset), + TypeID: id, + } + buf := make([]byte, FuncInfoSize) + internal.NativeEndian.PutUint32(buf, bfi.InsnOff) + internal.NativeEndian.PutUint32(buf[4:], uint32(bfi.TypeID)) + _, err = w.Write(buf) + return err +} + +// parseFuncInfos parses a func_info sub-section within .BTF.ext ito a map of +// func infos indexed by section name. +func parseFuncInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfFuncInfo, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err + } + + result := make(map[string][]bpfFuncInfo) + for { + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + if err != nil { + return nil, err + } + + records, err := parseFuncInfoRecords(r, bo, recordSize, infoHeader.NumInfo, true) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) + } + + result[secName] = records + } +} + +// parseFuncInfoRecords parses a stream of func_infos into a funcInfos. +// These records appear after a btf_ext_info_sec header in the func_info +// sub-section of .BTF.ext. +func parseFuncInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32, offsetInBytes bool) ([]bpfFuncInfo, error) { + var out []bpfFuncInfo + var fi bpfFuncInfo + + if exp, got := FuncInfoSize, recordSize; exp != got { + // BTF blob's record size is longer than we know how to parse. + return nil, fmt.Errorf("expected FuncInfo record size %d, but BTF blob contains %d", exp, got) + } + + for i := uint32(0); i < recordNum; i++ { + if err := binary.Read(r, bo, &fi); err != nil { + return nil, fmt.Errorf("can't read function info: %v", err) + } + + if offsetInBytes { + if fi.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", fi.InsnOff) + } + + // ELF tracks offset in bytes, the kernel expects raw BPF instructions. + // Convert as early as possible. + fi.InsnOff /= asm.InstructionSize + } + + out = append(out, fi) + } + + return out, nil +} + +var LineInfoSize = uint32(binary.Size(bpfLineInfo{})) + +// Line represents the location and contents of a single line of source +// code a BPF ELF was compiled from. +type Line struct { + fileName string + line string + lineNumber uint32 + lineColumn uint32 +} + +func (li *Line) FileName() string { + return li.fileName +} + +func (li *Line) Line() string { + return li.line +} + +func (li *Line) LineNumber() uint32 { + return li.lineNumber +} + +func (li *Line) LineColumn() uint32 { + return li.lineColumn +} + +func (li *Line) String() string { + return li.line +} + +// LineOffsets contains a sorted list of line infos. +type LineOffsets []LineOffset + +// LineOffset represents a line info and its raw instruction offset. +type LineOffset struct { + Offset asm.RawInstructionOffset + Line *Line +} + +// Constants for the format of bpfLineInfo.LineCol. +const ( + bpfLineShift = 10 + bpfLineMax = (1 << (32 - bpfLineShift)) - 1 + bpfColumnMax = (1 << bpfLineShift) - 1 +) + +type bpfLineInfo struct { + // Instruction offset of the line within the whole instruction stream, in instructions. + InsnOff uint32 + FileNameOff uint32 + LineOff uint32 + LineCol uint32 +} + +// LoadLineInfos parses BTF line info in kernel wire format. +func LoadLineInfos(reader io.Reader, bo binary.ByteOrder, recordNum uint32, spec *Spec) (LineOffsets, error) { + lis, err := parseLineInfoRecords( + reader, + bo, + LineInfoSize, + recordNum, + false, + ) + if err != nil { + return LineOffsets{}, fmt.Errorf("parsing BTF line info: %w", err) + } + + return newLineInfos(lis, spec.strings) +} + +func newLineInfo(li bpfLineInfo, strings *stringTable) (LineOffset, error) { + line, err := strings.LookupCached(li.LineOff) + if err != nil { + return LineOffset{}, fmt.Errorf("lookup of line: %w", err) + } + + fileName, err := strings.LookupCached(li.FileNameOff) + if err != nil { + return LineOffset{}, fmt.Errorf("lookup of filename: %w", err) + } + + lineNumber := li.LineCol >> bpfLineShift + lineColumn := li.LineCol & bpfColumnMax + + return LineOffset{ + asm.RawInstructionOffset(li.InsnOff), + &Line{ + fileName, + line, + lineNumber, + lineColumn, + }, + }, nil +} + +func newLineInfos(blis []bpfLineInfo, strings *stringTable) (LineOffsets, error) { + lis := make([]LineOffset, 0, len(blis)) + for _, bli := range blis { + li, err := newLineInfo(bli, strings) + if err != nil { + return LineOffsets{}, fmt.Errorf("offset %d: %w", bli.InsnOff, err) + } + lis = append(lis, li) + } + sort.Slice(lis, func(i, j int) bool { + return lis[i].Offset <= lis[j].Offset + }) + return lis, nil +} + +// marshal writes the binary representation of the LineInfo to w. +func (li *LineOffset) marshal(w *bytes.Buffer, b *Builder) error { + line := li.Line + if line.lineNumber > bpfLineMax { + return fmt.Errorf("line %d exceeds %d", line.lineNumber, bpfLineMax) + } + + if line.lineColumn > bpfColumnMax { + return fmt.Errorf("column %d exceeds %d", line.lineColumn, bpfColumnMax) + } + + fileNameOff, err := b.addString(line.fileName) + if err != nil { + return fmt.Errorf("file name %q: %w", line.fileName, err) + } + + lineOff, err := b.addString(line.line) + if err != nil { + return fmt.Errorf("line %q: %w", line.line, err) + } + + bli := bpfLineInfo{ + uint32(li.Offset), + fileNameOff, + lineOff, + (line.lineNumber << bpfLineShift) | line.lineColumn, + } + + buf := make([]byte, LineInfoSize) + internal.NativeEndian.PutUint32(buf, bli.InsnOff) + internal.NativeEndian.PutUint32(buf[4:], bli.FileNameOff) + internal.NativeEndian.PutUint32(buf[8:], bli.LineOff) + internal.NativeEndian.PutUint32(buf[12:], bli.LineCol) + _, err = w.Write(buf) + return err +} + +// parseLineInfos parses a line_info sub-section within .BTF.ext ito a map of +// line infos indexed by section name. +func parseLineInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfLineInfo, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err + } + + result := make(map[string][]bpfLineInfo) + for { + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + if err != nil { + return nil, err + } + + records, err := parseLineInfoRecords(r, bo, recordSize, infoHeader.NumInfo, true) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) + } + + result[secName] = records + } +} + +// parseLineInfoRecords parses a stream of line_infos into a lineInfos. +// These records appear after a btf_ext_info_sec header in the line_info +// sub-section of .BTF.ext. +func parseLineInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32, offsetInBytes bool) ([]bpfLineInfo, error) { + if exp, got := uint32(binary.Size(bpfLineInfo{})), recordSize; exp != got { + // BTF blob's record size is longer than we know how to parse. + return nil, fmt.Errorf("expected LineInfo record size %d, but BTF blob contains %d", exp, got) + } + + out := make([]bpfLineInfo, recordNum) + if err := binary.Read(r, bo, out); err != nil { + return nil, fmt.Errorf("can't read line info: %v", err) + } + + if offsetInBytes { + for i := range out { + li := &out[i] + if li.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", li.InsnOff) + } + + // ELF tracks offset in bytes, the kernel expects raw BPF instructions. + // Convert as early as possible. + li.InsnOff /= asm.InstructionSize + } + } + + return out, nil +} + +// bpfCORERelo matches the kernel's struct bpf_core_relo. +type bpfCORERelo struct { + InsnOff uint32 + TypeID TypeID + AccessStrOff uint32 + Kind coreKind +} + +type CORERelocation struct { + // The local type of the relocation, stripped of typedefs and qualifiers. + typ Type + accessor coreAccessor + kind coreKind + // The ID of the local type in the source BTF. + id TypeID +} + +func (cr *CORERelocation) String() string { + return fmt.Sprintf("CORERelocation(%s, %s[%s], local_id=%d)", cr.kind, cr.typ, cr.accessor, cr.id) +} + +func CORERelocationMetadata(ins *asm.Instruction) *CORERelocation { + relo, _ := ins.Metadata.Get(coreRelocationMeta{}).(*CORERelocation) + return relo +} + +// CORERelocationInfos contains a sorted list of co:re relocation infos. +type CORERelocationInfos struct { + infos []coreRelocationInfo +} + +type coreRelocationInfo struct { + relo *CORERelocation + offset asm.RawInstructionOffset +} + +func newRelocationInfo(relo bpfCORERelo, spec *Spec, strings *stringTable) (*coreRelocationInfo, error) { + typ, err := spec.TypeByID(relo.TypeID) + if err != nil { + return nil, err + } + + accessorStr, err := strings.Lookup(relo.AccessStrOff) + if err != nil { + return nil, err + } + + accessor, err := parseCOREAccessor(accessorStr) + if err != nil { + return nil, fmt.Errorf("accessor %q: %s", accessorStr, err) + } + + return &coreRelocationInfo{ + &CORERelocation{ + typ, + accessor, + relo.Kind, + relo.TypeID, + }, + asm.RawInstructionOffset(relo.InsnOff), + }, nil +} + +func newRelocationInfos(brs []bpfCORERelo, spec *Spec, strings *stringTable) (CORERelocationInfos, error) { + rs := CORERelocationInfos{ + infos: make([]coreRelocationInfo, 0, len(brs)), + } + for _, br := range brs { + relo, err := newRelocationInfo(br, spec, strings) + if err != nil { + return CORERelocationInfos{}, fmt.Errorf("offset %d: %w", br.InsnOff, err) + } + rs.infos = append(rs.infos, *relo) + } + sort.Slice(rs.infos, func(i, j int) bool { + return rs.infos[i].offset < rs.infos[j].offset + }) + return rs, nil +} + +var extInfoReloSize = binary.Size(bpfCORERelo{}) + +// parseCORERelos parses a core_relos sub-section within .BTF.ext ito a map of +// CO-RE relocations indexed by section name. +func parseCORERelos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfCORERelo, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err + } + + if recordSize != uint32(extInfoReloSize) { + return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize) + } + + result := make(map[string][]bpfCORERelo) + for { + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + if err != nil { + return nil, err + } + + records, err := parseCOREReloRecords(r, bo, infoHeader.NumInfo) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) + } + + result[secName] = records + } +} + +// parseCOREReloRecords parses a stream of CO-RE relocation entries into a +// coreRelos. These records appear after a btf_ext_info_sec header in the +// core_relos sub-section of .BTF.ext. +func parseCOREReloRecords(r io.Reader, bo binary.ByteOrder, recordNum uint32) ([]bpfCORERelo, error) { + var out []bpfCORERelo + + var relo bpfCORERelo + for i := uint32(0); i < recordNum; i++ { + if err := binary.Read(r, bo, &relo); err != nil { + return nil, fmt.Errorf("can't read CO-RE relocation: %v", err) + } + + if relo.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", relo.InsnOff) + } + + // ELF tracks offset in bytes, the kernel expects raw BPF instructions. + // Convert as early as possible. + relo.InsnOff /= asm.InstructionSize + + out = append(out, relo) + } + + return out, nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/btf/feature.go b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/feature.go new file mode 100644 index 000000000..5b427f5d3 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/feature.go @@ -0,0 +1,158 @@ +package btf + +import ( + "errors" + "math" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// haveBTF attempts to load a BTF blob containing an Int. It should pass on any +// kernel that supports BPF_BTF_LOAD. +var haveBTF = internal.NewFeatureTest("BTF", func() error { + // 0-length anonymous integer + err := probeBTF(&Int{}) + if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { + return internal.ErrNotSupported + } + return err +}, "4.18") + +// haveMapBTF attempts to load a minimal BTF blob containing a Var. It is +// used as a proxy for .bss, .data and .rodata map support, which generally +// come with a Var and Datasec. These were introduced in Linux 5.2. +var haveMapBTF = internal.NewFeatureTest("Map BTF (Var/Datasec)", func() error { + if err := haveBTF(); err != nil { + return err + } + + v := &Var{ + Name: "a", + Type: &Pointer{(*Void)(nil)}, + } + + err := probeBTF(v) + if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { + // Treat both EINVAL and EPERM as not supported: creating the map may still + // succeed without Btf* attrs. + return internal.ErrNotSupported + } + return err +}, "5.2") + +// haveProgBTF attempts to load a BTF blob containing a Func and FuncProto. It +// is used as a proxy for ext_info (func_info) support, which depends on +// Func(Proto) by definition. +var haveProgBTF = internal.NewFeatureTest("Program BTF (func/line_info)", func() error { + if err := haveBTF(); err != nil { + return err + } + + fn := &Func{ + Name: "a", + Type: &FuncProto{Return: (*Void)(nil)}, + } + + err := probeBTF(fn) + if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { + return internal.ErrNotSupported + } + return err +}, "5.0") + +var haveFuncLinkage = internal.NewFeatureTest("BTF func linkage", func() error { + if err := haveProgBTF(); err != nil { + return err + } + + fn := &Func{ + Name: "a", + Type: &FuncProto{Return: (*Void)(nil)}, + Linkage: GlobalFunc, + } + + err := probeBTF(fn) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + return err +}, "5.6") + +var haveDeclTags = internal.NewFeatureTest("BTF decl tags", func() error { + if err := haveBTF(); err != nil { + return err + } + + t := &Typedef{ + Name: "a", + Type: &Int{}, + Tags: []string{"a"}, + } + + err := probeBTF(t) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + return err +}, "5.16") + +var haveTypeTags = internal.NewFeatureTest("BTF type tags", func() error { + if err := haveBTF(); err != nil { + return err + } + + t := &TypeTag{ + Type: &Int{}, + Value: "a", + } + + err := probeBTF(t) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + return err +}, "5.17") + +var haveEnum64 = internal.NewFeatureTest("ENUM64", func() error { + if err := haveBTF(); err != nil { + return err + } + + enum := &Enum{ + Size: 8, + Values: []EnumValue{ + {"TEST", math.MaxUint32 + 1}, + }, + } + + err := probeBTF(enum) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + return err +}, "6.0") + +func probeBTF(typ Type) error { + b, err := NewBuilder([]Type{typ}) + if err != nil { + return err + } + + buf, err := b.Marshal(nil, nil) + if err != nil { + return err + } + + fd, err := sys.BtfLoad(&sys.BtfLoadAttr{ + Btf: sys.SlicePointer(buf), + BtfSize: uint32(len(buf)), + }) + + if err == nil { + fd.Close() + } + + return err +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/format.go b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/format.go similarity index 66% rename from src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/format.go rename to src/nvcgo/vendor/github.com/cilium/ebpf/btf/format.go index 159319c33..7deca334a 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/format.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/format.go @@ -56,50 +56,48 @@ func (gf *GoFormatter) enumIdentifier(name, element string) string { // // It encodes https://golang.org/ref/spec#Type_declarations: // -// type foo struct { bar uint32; } -// type bar int32 +// type foo struct { _ structs.HostLayout; bar uint32; } +// type bar int32 func (gf *GoFormatter) writeTypeDecl(name string, typ Type) error { if name == "" { return fmt.Errorf("need a name for type %s", typ) } - typ, err := skipQualifiers(typ) - if err != nil { + typ = skipQualifiers(typ) + fmt.Fprintf(&gf.w, "type %s ", name) + if err := gf.writeTypeLit(typ, 0); err != nil { return err } - switch v := typ.(type) { - case *Enum: - fmt.Fprintf(&gf.w, "type %s int32", name) - if len(v.Values) == 0 { - return nil - } + e, ok := typ.(*Enum) + if !ok || len(e.Values) == 0 { + return nil + } - gf.w.WriteString("; const ( ") - for _, ev := range v.Values { - id := gf.enumIdentifier(name, ev.Name) - fmt.Fprintf(&gf.w, "%s %s = %d; ", id, name, ev.Value) + gf.w.WriteString("; const ( ") + for _, ev := range e.Values { + id := gf.enumIdentifier(name, ev.Name) + var value any + if e.Signed { + value = int64(ev.Value) + } else { + value = ev.Value } - gf.w.WriteString(")") - - return nil + fmt.Fprintf(&gf.w, "%s %s = %d; ", id, name, value) } + gf.w.WriteString(")") - fmt.Fprintf(&gf.w, "type %s ", name) - return gf.writeTypeLit(typ, 0) + return nil } // writeType outputs the name of a named type or a literal describing the type. // // It encodes https://golang.org/ref/spec#Types. // -// foo (if foo is a named type) -// uint32 +// foo (if foo is a named type) +// uint32 func (gf *GoFormatter) writeType(typ Type, depth int) error { - typ, err := skipQualifiers(typ) - if err != nil { - return err - } + typ = skipQualifiers(typ) name := gf.Names[typ] if name != "" { @@ -116,25 +114,35 @@ func (gf *GoFormatter) writeType(typ Type, depth int) error { // // It encodes https://golang.org/ref/spec#TypeLit. // -// struct { bar uint32; } -// uint32 +// struct { _ structs.HostLayout; bar uint32; } +// uint32 func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error { depth++ - if depth > maxTypeDepth { + if depth > maxResolveDepth { return errNestedTooDeep } - typ, err := skipQualifiers(typ) - if err != nil { - return err - } - - switch v := typ.(type) { + var err error + switch v := skipQualifiers(typ).(type) { case *Int: - gf.writeIntLit(v) + err = gf.writeIntLit(v) case *Enum: - gf.w.WriteString("int32") + if !v.Signed { + gf.w.WriteRune('u') + } + switch v.Size { + case 1: + gf.w.WriteString("int8") + case 2: + gf.w.WriteString("int16") + case 4: + gf.w.WriteString("int32") + case 8: + gf.w.WriteString("int64") + default: + err = fmt.Errorf("invalid enum size %d", v.Size) + } case *Typedef: err = gf.writeType(v.Type, depth) @@ -153,8 +161,11 @@ func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error { case *Datasec: err = gf.writeDatasecLit(v, depth) + case *Var: + err = gf.writeTypeLit(v.Type, depth) + default: - return fmt.Errorf("type %s: %w", typ, ErrNotSupported) + return fmt.Errorf("type %T: %w", v, ErrNotSupported) } if err != nil { @@ -164,23 +175,40 @@ func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error { return nil } -func (gf *GoFormatter) writeIntLit(i *Int) { - // NB: Encoding.IsChar is ignored. - if i.Encoding.IsBool() && i.Size == 1 { - gf.w.WriteString("bool") - return - } - +func (gf *GoFormatter) writeIntLit(i *Int) error { bits := i.Size * 8 - if i.Encoding.IsSigned() { - fmt.Fprintf(&gf.w, "int%d", bits) - } else { - fmt.Fprintf(&gf.w, "uint%d", bits) + switch i.Encoding { + case Bool: + if i.Size != 1 { + return fmt.Errorf("bool with size %d", i.Size) + } + gf.w.WriteString("bool") + case Char: + if i.Size != 1 { + return fmt.Errorf("char with size %d", i.Size) + } + // BTF doesn't have a way to specify the signedness of a char. Assume + // we are dealing with unsigned, since this works nicely with []byte + // in Go code. + fallthrough + case Unsigned, Signed: + stem := "uint" + if i.Encoding == Signed { + stem = "int" + } + if i.Size > 8 { + fmt.Fprintf(&gf.w, "[%d]byte /* %s%d */", i.Size, stem, i.Size*8) + } else { + fmt.Fprintf(&gf.w, "%s%d", stem, bits) + } + default: + return fmt.Errorf("can't encode %s", i.Encoding) } + return nil } func (gf *GoFormatter) writeStructLit(size uint32, members []Member, depth int) error { - gf.w.WriteString("struct { ") + gf.w.WriteString("struct { _ structs.HostLayout; ") prevOffset := uint32(0) skippedBitfield := false @@ -190,18 +218,22 @@ func (gf *GoFormatter) writeStructLit(size uint32, members []Member, depth int) continue } - offset := m.OffsetBits / 8 + offset := m.Offset.Bytes() if n := offset - prevOffset; skippedBitfield && n > 0 { fmt.Fprintf(&gf.w, "_ [%d]byte /* unsupported bitfield */; ", n) } else { gf.writePadding(n) } - size, err := Sizeof(m.Type) + fieldSize, err := Sizeof(m.Type) if err != nil { return fmt.Errorf("field %d: %w", i, err) } - prevOffset = offset + uint32(size) + + prevOffset = offset + uint32(fieldSize) + if prevOffset > size { + return fmt.Errorf("field %d of size %d exceeds type size %d", i, fieldSize, size) + } if err := gf.writeStructField(m, depth); err != nil { return fmt.Errorf("field %d: %w", i, err) @@ -217,8 +249,8 @@ func (gf *GoFormatter) writeStructField(m Member, depth int) error { if m.BitfieldSize > 0 { return fmt.Errorf("bitfields are not supported") } - if m.OffsetBits%8 != 0 { - return fmt.Errorf("unsupported offset %d", m.OffsetBits) + if m.Offset%8 != 0 { + return fmt.Errorf("unsupported offset %d", m.Offset) } if m.Name == "" { @@ -236,7 +268,7 @@ func (gf *GoFormatter) writeStructField(m Member, depth int) error { } depth++ - if depth > maxTypeDepth { + if depth > maxResolveDepth { return errNestedTooDeep } @@ -266,11 +298,15 @@ func (gf *GoFormatter) writeStructField(m Member, depth int) error { } func (gf *GoFormatter) writeDatasecLit(ds *Datasec, depth int) error { - gf.w.WriteString("struct { ") + gf.w.WriteString("struct { _ structs.HostLayout; ") prevOffset := uint32(0) for i, vsi := range ds.Vars { - v := vsi.Type.(*Var) + v, ok := vsi.Type.(*Var) + if !ok { + return fmt.Errorf("can't format %s as part of data section", vsi.Type) + } + if v.Linkage != GlobalVar { // Ignore static, extern, etc. for now. continue @@ -302,3 +338,16 @@ func (gf *GoFormatter) writePadding(bytes uint32) { fmt.Fprintf(&gf.w, "_ [%d]byte; ", bytes) } } + +func skipQualifiers(typ Type) Type { + result := typ + for depth := 0; depth <= maxResolveDepth; depth++ { + switch v := (result).(type) { + case qualifier: + result = v.qualify() + default: + return result + } + } + return &cycle{typ} +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/btf/handle.go b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/handle.go new file mode 100644 index 000000000..89e09a3b8 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/handle.go @@ -0,0 +1,332 @@ +package btf + +import ( + "errors" + "fmt" + "math" + "os" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// Handle is a reference to BTF loaded into the kernel. +type Handle struct { + fd *sys.FD + + // Size of the raw BTF in bytes. + size uint32 + + needsKernelBase bool +} + +// NewHandle loads the contents of a [Builder] into the kernel. +// +// Returns an error wrapping ErrNotSupported if the kernel doesn't support BTF. +func NewHandle(b *Builder) (*Handle, error) { + small := getByteSlice() + defer putByteSlice(small) + + buf, err := b.Marshal(*small, KernelMarshalOptions()) + if err != nil { + return nil, fmt.Errorf("marshal BTF: %w", err) + } + + return NewHandleFromRawBTF(buf) +} + +// NewHandleFromRawBTF loads raw BTF into the kernel. +// +// Returns an error wrapping ErrNotSupported if the kernel doesn't support BTF. +func NewHandleFromRawBTF(btf []byte) (*Handle, error) { + const minLogSize = 64 * 1024 + + if platform.IsWindows { + return nil, fmt.Errorf("btf: handle: %w", internal.ErrNotSupportedOnOS) + } + + if uint64(len(btf)) > math.MaxUint32 { + return nil, errors.New("BTF exceeds the maximum size") + } + + attr := &sys.BtfLoadAttr{ + Btf: sys.SlicePointer(btf), + BtfSize: uint32(len(btf)), + } + + var ( + logBuf []byte + err error + ) + for { + var fd *sys.FD + fd, err = sys.BtfLoad(attr) + if err == nil { + return &Handle{fd, attr.BtfSize, false}, nil + } + + if attr.BtfLogTrueSize != 0 && attr.BtfLogSize >= attr.BtfLogTrueSize { + // The log buffer already has the correct size. + break + } + + if attr.BtfLogSize != 0 && !errors.Is(err, unix.ENOSPC) { + // Up until at least kernel 6.0, the BTF verifier does not return ENOSPC + // if there are other verification errors. ENOSPC is only returned when + // the BTF blob is correct, a log was requested, and the provided buffer + // is too small. We're therefore not sure whether we got the full + // log or not. + break + } + + // Make an educated guess how large the buffer should be. Start + // at a reasonable minimum and then double the size. + logSize := uint32(max(len(logBuf)*2, minLogSize)) + if int(logSize) < len(logBuf) { + return nil, errors.New("overflow while probing log buffer size") + } + + if attr.BtfLogTrueSize != 0 { + // The kernel has given us a hint how large the log buffer has to be. + logSize = attr.BtfLogTrueSize + } + + logBuf = make([]byte, logSize) + attr.BtfLogSize = logSize + attr.BtfLogBuf = sys.SlicePointer(logBuf) + attr.BtfLogLevel = 1 + } + + if err := haveBTF(); err != nil { + return nil, err + } + + return nil, internal.ErrorWithLog("load btf", err, logBuf) +} + +// NewHandleFromID returns the BTF handle for a given id. +// +// Prefer calling [ebpf.Program.Handle] or [ebpf.Map.Handle] if possible. +// +// Returns ErrNotExist, if there is no BTF with the given id. +// +// Requires CAP_SYS_ADMIN. +func NewHandleFromID(id ID) (*Handle, error) { + if platform.IsWindows { + return nil, fmt.Errorf("btf: handle: %w", internal.ErrNotSupportedOnOS) + } + + fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{ + Id: uint32(id), + }) + if err != nil { + return nil, fmt.Errorf("get FD for ID %d: %w", id, err) + } + + info, err := newHandleInfoFromFD(fd) + if err != nil { + _ = fd.Close() + return nil, err + } + + return &Handle{fd, info.size, info.IsModule()}, nil +} + +// Spec parses the kernel BTF into Go types. +// +// base must contain type information for vmlinux if the handle is for +// a kernel module. It may be nil otherwise. +func (h *Handle) Spec(base *Spec) (*Spec, error) { + var btfInfo sys.BtfInfo + btfBuffer := make([]byte, h.size) + btfInfo.Btf = sys.SlicePointer(btfBuffer) + btfInfo.BtfSize = uint32(len(btfBuffer)) + + if err := sys.ObjInfo(h.fd, &btfInfo); err != nil { + return nil, err + } + + if h.needsKernelBase && base == nil { + return nil, fmt.Errorf("missing base types") + } + + return loadRawSpec(btfBuffer, base) +} + +// Close destroys the handle. +// +// Subsequent calls to FD will return an invalid value. +func (h *Handle) Close() error { + if h == nil { + return nil + } + + return h.fd.Close() +} + +// FD returns the file descriptor for the handle. +func (h *Handle) FD() int { + return h.fd.Int() +} + +// Info returns metadata about the handle. +func (h *Handle) Info() (*HandleInfo, error) { + return newHandleInfoFromFD(h.fd) +} + +// HandleInfo describes a Handle. +type HandleInfo struct { + // ID of this handle in the kernel. The ID is only valid as long as the + // associated handle is kept alive. + ID ID + + // Name is an identifying name for the BTF, currently only used by the + // kernel. + Name string + + // IsKernel is true if the BTF originated with the kernel and not + // userspace. + IsKernel bool + + // Size of the raw BTF in bytes. + size uint32 +} + +func newHandleInfoFromFD(fd *sys.FD) (*HandleInfo, error) { + // We invoke the syscall once with a empty BTF and name buffers to get size + // information to allocate buffers. Then we invoke it a second time with + // buffers to receive the data. + var btfInfo sys.BtfInfo + if err := sys.ObjInfo(fd, &btfInfo); err != nil { + return nil, fmt.Errorf("get BTF info for fd %s: %w", fd, err) + } + + if btfInfo.NameLen > 0 { + // NameLen doesn't account for the terminating NUL. + btfInfo.NameLen++ + } + + // Don't pull raw BTF by default, since it may be quite large. + btfSize := btfInfo.BtfSize + btfInfo.BtfSize = 0 + + nameBuffer := make([]byte, btfInfo.NameLen) + btfInfo.Name = sys.SlicePointer(nameBuffer) + btfInfo.NameLen = uint32(len(nameBuffer)) + if err := sys.ObjInfo(fd, &btfInfo); err != nil { + return nil, err + } + + return &HandleInfo{ + ID: ID(btfInfo.Id), + Name: unix.ByteSliceToString(nameBuffer), + IsKernel: btfInfo.KernelBtf != 0, + size: btfSize, + }, nil +} + +// IsVmlinux returns true if the BTF is for the kernel itself. +func (i *HandleInfo) IsVmlinux() bool { + return i.IsKernel && i.Name == "vmlinux" +} + +// IsModule returns true if the BTF is for a kernel module. +func (i *HandleInfo) IsModule() bool { + return i.IsKernel && i.Name != "vmlinux" +} + +// HandleIterator allows enumerating BTF blobs loaded into the kernel. +type HandleIterator struct { + // The ID of the current handle. Only valid after a call to Next. + ID ID + // The current Handle. Only valid until a call to Next. + // See Take if you want to retain the handle. + Handle *Handle + err error +} + +// Next retrieves a handle for the next BTF object. +// +// Returns true if another BTF object was found. Call [HandleIterator.Err] after +// the function returns false. +func (it *HandleIterator) Next() bool { + if platform.IsWindows { + it.err = fmt.Errorf("btf: %w", internal.ErrNotSupportedOnOS) + return false + } + + id := it.ID + for { + attr := &sys.BtfGetNextIdAttr{Id: id} + err := sys.BtfGetNextId(attr) + if errors.Is(err, os.ErrNotExist) { + // There are no more BTF objects. + break + } else if err != nil { + it.err = fmt.Errorf("get next BTF ID: %w", err) + break + } + + id = attr.NextId + handle, err := NewHandleFromID(id) + if errors.Is(err, os.ErrNotExist) { + // Try again with the next ID. + continue + } else if err != nil { + it.err = fmt.Errorf("retrieve handle for ID %d: %w", id, err) + break + } + + it.Handle.Close() + it.ID, it.Handle = id, handle + return true + } + + // No more handles or we encountered an error. + it.Handle.Close() + it.Handle = nil + return false +} + +// Take the ownership of the current handle. +// +// It's the callers responsibility to close the handle. +func (it *HandleIterator) Take() *Handle { + handle := it.Handle + it.Handle = nil + return handle +} + +// Err returns an error if iteration failed for some reason. +func (it *HandleIterator) Err() error { + return it.err +} + +// FindHandle returns the first handle for which predicate returns true. +// +// Requires CAP_SYS_ADMIN. +// +// Returns an error wrapping ErrNotFound if predicate never returns true or if +// there is no BTF loaded into the kernel. +func FindHandle(predicate func(info *HandleInfo) bool) (*Handle, error) { + it := new(HandleIterator) + defer it.Handle.Close() + + for it.Next() { + info, err := it.Handle.Info() + if err != nil { + return nil, fmt.Errorf("info for ID %d: %w", it.ID, err) + } + + if predicate(info) { + return it.Take(), nil + } + } + if err := it.Err(); err != nil { + return nil, fmt.Errorf("iterate handles: %w", err) + } + + return nil, fmt.Errorf("find handle: %w", ErrNotFound) +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/btf/kernel.go b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/kernel.go new file mode 100644 index 000000000..bb7368bfc --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/kernel.go @@ -0,0 +1,333 @@ +package btf + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "slices" + "sort" + "sync" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/linux" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/unix" +) + +// globalCache amortises decoding BTF across all users of the library. +var globalCache = struct { + sync.RWMutex + kernel *Spec + modules map[string]*Spec +}{ + modules: make(map[string]*Spec), +} + +// FlushKernelSpec removes any cached kernel type information. +func FlushKernelSpec() { + globalCache.Lock() + defer globalCache.Unlock() + + globalCache.kernel = nil + globalCache.modules = make(map[string]*Spec) +} + +// LoadKernelSpec returns the current kernel's BTF information. +// +// Defaults to /sys/kernel/btf/vmlinux and falls back to scanning the file system +// for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled. +// +// Consider using [Cache] instead. +func LoadKernelSpec() (*Spec, error) { + spec, err := loadCachedKernelSpec() + return spec.Copy(), err +} + +// load (and cache) the kernel spec. +// +// Does not copy Spec. +func loadCachedKernelSpec() (*Spec, error) { + globalCache.RLock() + spec := globalCache.kernel + globalCache.RUnlock() + + if spec != nil { + return spec, nil + } + + globalCache.Lock() + defer globalCache.Unlock() + + // check again, to prevent race between multiple callers + if globalCache.kernel != nil { + return globalCache.kernel, nil + } + + spec, err := loadKernelSpec() + if err != nil { + return nil, err + } + + globalCache.kernel = spec + return spec, nil +} + +// LoadKernelModuleSpec returns the BTF information for the named kernel module. +// +// Using [Cache.Module] is faster when loading BTF for more than one module. +// +// Defaults to /sys/kernel/btf/. +// Returns an error wrapping ErrNotSupported if BTF is not enabled. +// Returns an error wrapping fs.ErrNotExist if BTF for the specific module doesn't exist. +func LoadKernelModuleSpec(module string) (*Spec, error) { + spec, err := loadCachedKernelModuleSpec(module) + return spec.Copy(), err +} + +// load (and cache) a module spec. +// +// Does not copy Spec. +func loadCachedKernelModuleSpec(module string) (*Spec, error) { + globalCache.RLock() + spec := globalCache.modules[module] + globalCache.RUnlock() + + if spec != nil { + return spec, nil + } + + base, err := loadCachedKernelSpec() + if err != nil { + return nil, err + } + + // NB: This only allows a single module to be parsed at a time. Not sure + // it makes a difference. + globalCache.Lock() + defer globalCache.Unlock() + + // check again, to prevent race between multiple callers + if spec := globalCache.modules[module]; spec != nil { + return spec, nil + } + + spec, err = loadKernelModuleSpec(module, base) + if err != nil { + return nil, err + } + + globalCache.modules[module] = spec + return spec, nil +} + +func loadKernelSpec() (*Spec, error) { + if platform.IsWindows { + return nil, internal.ErrNotSupportedOnOS + } + + fh, err := os.Open("/sys/kernel/btf/vmlinux") + if err == nil { + defer fh.Close() + + info, err := fh.Stat() + if err != nil { + return nil, fmt.Errorf("stat vmlinux: %w", err) + } + + // NB: It's not safe to mmap arbitrary files because mmap(2) doesn't + // guarantee that changes made after mmap are not visible in the mapping. + // + // This is not a problem for vmlinux, since it is always a read-only file. + raw, err := unix.Mmap(int(fh.Fd()), 0, int(info.Size()), unix.PROT_READ, unix.MAP_PRIVATE) + if err != nil { + return LoadSplitSpecFromReader(fh, nil) + } + + spec, err := loadRawSpec(raw, nil) + if err != nil { + _ = unix.Munmap(raw) + return nil, fmt.Errorf("load vmlinux: %w", err) + } + + runtime.AddCleanup(spec.decoder.sharedBuf, func(b []byte) { + _ = unix.Munmap(b) + }, raw) + + return spec, nil + } + + file, err := findVMLinux() + if err != nil { + return nil, err + } + defer file.Close() + + spec, err := LoadSpecFromReader(file) + return spec, err +} + +func loadKernelModuleSpec(module string, base *Spec) (*Spec, error) { + if platform.IsWindows { + return nil, internal.ErrNotSupportedOnOS + } + + dir, file := filepath.Split(module) + if dir != "" || filepath.Ext(file) != "" { + return nil, fmt.Errorf("invalid module name %q", module) + } + + fh, err := os.Open(filepath.Join("/sys/kernel/btf", module)) + if err != nil { + return nil, err + } + defer fh.Close() + + return LoadSplitSpecFromReader(fh, base) +} + +// findVMLinux scans multiple well-known paths for vmlinux kernel images. +func findVMLinux() (*os.File, error) { + if platform.IsWindows { + return nil, fmt.Errorf("find vmlinux: %w", internal.ErrNotSupportedOnOS) + } + + release, err := linux.KernelRelease() + if err != nil { + return nil, err + } + + // use same list of locations as libbpf + // https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122 + locations := []string{ + "/boot/vmlinux-%s", + "/lib/modules/%s/vmlinux-%[1]s", + "/lib/modules/%s/build/vmlinux", + "/usr/lib/modules/%s/kernel/vmlinux", + "/usr/lib/debug/boot/vmlinux-%s", + "/usr/lib/debug/boot/vmlinux-%s.debug", + "/usr/lib/debug/lib/modules/%s/vmlinux", + } + + for _, loc := range locations { + file, err := os.Open(fmt.Sprintf(loc, release)) + if errors.Is(err, os.ErrNotExist) { + continue + } + return file, err + } + + return nil, fmt.Errorf("no BTF found for kernel version %s: %w", release, internal.ErrNotSupported) +} + +// Cache allows to amortise the cost of decoding BTF across multiple call-sites. +// +// It is not safe for concurrent use. +type Cache struct { + kernelTypes *Spec + moduleTypes map[string]*Spec + loadedModules []string +} + +// NewCache creates a new Cache. +// +// Opportunistically reuses a global cache if possible. +func NewCache() *Cache { + globalCache.RLock() + defer globalCache.RUnlock() + + // This copy is either a no-op or very cheap, since the spec won't contain + // any inflated types. + kernel := globalCache.kernel.Copy() + if kernel == nil { + return &Cache{} + } + + modules := make(map[string]*Spec, len(globalCache.modules)) + for name, spec := range globalCache.modules { + decoder, _ := rebaseDecoder(spec.decoder, kernel.decoder) + // NB: Kernel module BTF can't contain ELF fixups because it is always + // read from sysfs. + modules[name] = &Spec{decoder: decoder} + } + + if len(modules) == 0 { + return &Cache{kernel, nil, nil} + } + + return &Cache{kernel, modules, nil} +} + +// Kernel is equivalent to [LoadKernelSpec], except that repeated calls do +// not copy the Spec. +func (c *Cache) Kernel() (*Spec, error) { + if c.kernelTypes != nil { + return c.kernelTypes, nil + } + + var err error + c.kernelTypes, err = LoadKernelSpec() + return c.kernelTypes, err +} + +// Module is equivalent to [LoadKernelModuleSpec], except that repeated calls do +// not copy the spec. +// +// All modules also share the return value of [Kernel] as their base. +func (c *Cache) Module(name string) (*Spec, error) { + if spec := c.moduleTypes[name]; spec != nil { + return spec, nil + } + + if c.moduleTypes == nil { + c.moduleTypes = make(map[string]*Spec) + } + + base, err := c.Kernel() + if err != nil { + return nil, err + } + + spec, err := loadCachedKernelModuleSpec(name) + if err != nil { + return nil, err + } + + // Important: base is shared between modules. This allows inflating common + // types only once. + decoder, err := rebaseDecoder(spec.decoder, base.decoder) + if err != nil { + return nil, err + } + + spec = &Spec{decoder: decoder} + c.moduleTypes[name] = spec + return spec, err +} + +// Modules returns a sorted list of all loaded modules. +func (c *Cache) Modules() ([]string, error) { + if c.loadedModules != nil { + return c.loadedModules, nil + } + + btfDir, err := os.Open("/sys/kernel/btf") + if err != nil { + return nil, err + } + defer btfDir.Close() + + entries, err := btfDir.Readdirnames(-1) + if err != nil { + return nil, err + } + + entries = slices.DeleteFunc(entries, func(s string) bool { + return s == "vmlinux" + }) + + sort.Strings(entries) + c.loadedModules = entries + return entries, nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/btf/marshal.go b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/marshal.go new file mode 100644 index 000000000..308ce8d34 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/marshal.go @@ -0,0 +1,696 @@ +package btf + +import ( + "encoding/binary" + "errors" + "fmt" + "maps" + "math" + "slices" + "sync" + "unsafe" + + "github.com/cilium/ebpf/internal" +) + +type MarshalOptions struct { + // Target byte order. Defaults to the system's native endianness. + Order binary.ByteOrder + // Remove function linkage information for compatibility with <5.6 kernels. + StripFuncLinkage bool + // Replace decl tags with a placeholder for compatibility with <5.16 kernels. + ReplaceDeclTags bool + // Replace TypeTags with a placeholder for compatibility with <5.17 kernels. + ReplaceTypeTags bool + // Replace Enum64 with a placeholder for compatibility with <6.0 kernels. + ReplaceEnum64 bool + // Prevent the "No type found" error when loading BTF without any types. + PreventNoTypeFound bool +} + +// KernelMarshalOptions will generate BTF suitable for the current kernel. +func KernelMarshalOptions() *MarshalOptions { + return &MarshalOptions{ + Order: internal.NativeEndian, + StripFuncLinkage: haveFuncLinkage() != nil, + ReplaceDeclTags: haveDeclTags() != nil, + ReplaceTypeTags: haveTypeTags() != nil, + ReplaceEnum64: haveEnum64() != nil, + PreventNoTypeFound: true, // All current kernels require this. + } +} + +// encoder turns Types into raw BTF. +type encoder struct { + MarshalOptions + + pending internal.Deque[Type] + strings *stringTableBuilder + ids map[Type]TypeID + visited map[Type]struct{} + lastID TypeID +} + +var bufferPool = sync.Pool{ + New: func() any { + buf := make([]byte, btfHeaderLen+128) + return &buf + }, +} + +func getByteSlice() *[]byte { + return bufferPool.Get().(*[]byte) +} + +func putByteSlice(buf *[]byte) { + *buf = (*buf)[:0] + bufferPool.Put(buf) +} + +// Builder turns Types into raw BTF. +// +// The default value may be used and represents an empty BTF blob. Void is +// added implicitly if necessary. +type Builder struct { + // Explicitly added types. + types []Type + // IDs for all added types which the user knows about. + stableIDs map[Type]TypeID + // Explicitly added strings. + strings *stringTableBuilder +} + +// NewBuilder creates a Builder from a list of types. +// +// It is more efficient than calling [Add] individually. +// +// Returns an error if adding any of the types fails. +func NewBuilder(types []Type) (*Builder, error) { + b := &Builder{ + make([]Type, 0, len(types)), + make(map[Type]TypeID, len(types)), + nil, + } + + for _, typ := range types { + _, err := b.Add(typ) + if err != nil { + return nil, fmt.Errorf("add %s: %w", typ, err) + } + } + + return b, nil +} + +// Empty returns true if neither types nor strings have been added. +func (b *Builder) Empty() bool { + return len(b.types) == 0 && (b.strings == nil || b.strings.Length() == 0) +} + +// Add a Type and allocate a stable ID for it. +// +// Adding the identical Type multiple times is valid and will return the same ID. +// +// See [Type] for details on identity. +func (b *Builder) Add(typ Type) (TypeID, error) { + if b.stableIDs == nil { + b.stableIDs = make(map[Type]TypeID) + } + + if _, ok := typ.(*Void); ok { + // Equality is weird for void, since it is a zero sized type. + return 0, nil + } + + if ds, ok := typ.(*Datasec); ok { + if err := datasecResolveWorkaround(b, ds); err != nil { + return 0, err + } + } + + id, ok := b.stableIDs[typ] + if ok { + return id, nil + } + + b.types = append(b.types, typ) + + id = TypeID(len(b.types)) + if int(id) != len(b.types) { + return 0, fmt.Errorf("no more type IDs") + } + + b.stableIDs[typ] = id + return id, nil +} + +// Marshal encodes all types in the Marshaler into BTF wire format. +// +// opts may be nil. +func (b *Builder) Marshal(buf []byte, opts *MarshalOptions) ([]byte, error) { + stb := b.strings + if stb == nil { + // Assume that most types are named. This makes encoding large BTF like + // vmlinux a lot cheaper. + stb = newStringTableBuilder(len(b.types)) + } else { + // Avoid modifying the Builder's string table. + stb = b.strings.Copy() + } + + if opts == nil { + opts = &MarshalOptions{Order: internal.NativeEndian} + } + + // Reserve space for the BTF header. + buf = slices.Grow(buf, btfHeaderLen)[:btfHeaderLen] + + e := encoder{ + MarshalOptions: *opts, + strings: stb, + lastID: TypeID(len(b.types)), + visited: make(map[Type]struct{}, len(b.types)), + ids: maps.Clone(b.stableIDs), + } + + if e.ids == nil { + e.ids = make(map[Type]TypeID) + } + + types := b.types + if len(types) == 0 && stb.Length() > 0 && opts.PreventNoTypeFound { + // We have strings that need to be written out, + // but no types (besides the implicit Void). + // Kernels as recent as v6.7 refuse to load such BTF + // with a "No type found" error in the log. + // Fix this by adding a dummy type. + types = []Type{&Int{Size: 0}} + } + + // Ensure that types are marshaled in the exact order they were Add()ed. + // Otherwise the ID returned from Add() won't match. + e.pending.Grow(len(types)) + for _, typ := range types { + e.pending.Push(typ) + } + + buf, err := e.deflatePending(buf) + if err != nil { + return nil, err + } + + length := len(buf) + typeLen := uint32(length - btfHeaderLen) + + stringLen := e.strings.Length() + buf = e.strings.AppendEncoded(buf) + + // Fill out the header, and write it out. + header := &btfHeader{ + Magic: btfMagic, + Version: 1, + Flags: 0, + HdrLen: uint32(btfHeaderLen), + TypeOff: 0, + TypeLen: typeLen, + StringOff: typeLen, + StringLen: uint32(stringLen), + } + + _, err = binary.Encode(buf[:btfHeaderLen], e.Order, header) + if err != nil { + return nil, fmt.Errorf("write header: %v", err) + } + + return buf, nil +} + +// addString adds a string to the resulting BTF. +// +// Adding the same string multiple times will return the same result. +// +// Returns an identifier into the string table or an error if the string +// contains invalid characters. +func (b *Builder) addString(str string) (uint32, error) { + if b.strings == nil { + b.strings = newStringTableBuilder(0) + } + + return b.strings.Add(str) +} + +func (e *encoder) allocateIDs(root Type) error { + for typ := range postorder(root, e.visited) { + if _, ok := typ.(*Void); ok { + continue + } + + if _, ok := e.ids[typ]; ok { + continue + } + + id := e.lastID + 1 + if id < e.lastID { + return errors.New("type ID overflow") + } + + e.pending.Push(typ) + e.ids[typ] = id + e.lastID = id + } + + return nil +} + +// id returns the ID for the given type or panics with an error. +func (e *encoder) id(typ Type) TypeID { + if _, ok := typ.(*Void); ok { + return 0 + } + + id, ok := e.ids[typ] + if !ok { + panic(fmt.Errorf("no ID for type %v", typ)) + } + + return id +} + +func (e *encoder) deflatePending(buf []byte) ([]byte, error) { + // Declare root outside of the loop to avoid repeated heap allocations. + var root Type + + for !e.pending.Empty() { + root = e.pending.Shift() + + // Allocate IDs for all children of typ, including transitive dependencies. + err := e.allocateIDs(root) + if err != nil { + return nil, err + } + + buf, err = e.deflateType(buf, root) + if err != nil { + id := e.ids[root] + return nil, fmt.Errorf("deflate %v with ID %d: %w", root, id, err) + } + } + + return buf, nil +} + +func (e *encoder) deflateType(buf []byte, typ Type) (_ []byte, err error) { + defer func() { + if r := recover(); r != nil { + var ok bool + err, ok = r.(error) + if !ok { + panic(r) + } + } + }() + + var raw btfType + raw.NameOff, err = e.strings.Add(typ.TypeName()) + if err != nil { + return nil, err + } + + // Reserve space for the btfType header. + start := len(buf) + buf = append(buf, make([]byte, unsafe.Sizeof(raw))...) + + switch v := typ.(type) { + case *Void: + return nil, errors.New("Void is implicit in BTF wire format") + + case *Int: + buf, err = e.deflateInt(buf, &raw, v) + + case *Pointer: + raw.SetKind(kindPointer) + raw.SetType(e.id(v.Target)) + + case *Array: + raw.SetKind(kindArray) + buf, err = binary.Append(buf, e.Order, &btfArray{ + e.id(v.Type), + e.id(v.Index), + v.Nelems, + }) + + case *Struct: + raw.SetKind(kindStruct) + raw.SetSize(v.Size) + buf, err = e.deflateMembers(buf, &raw, v.Members) + + case *Union: + buf, err = e.deflateUnion(buf, &raw, v) + + case *Enum: + if v.Size == 8 { + buf, err = e.deflateEnum64(buf, &raw, v) + } else { + buf, err = e.deflateEnum(buf, &raw, v) + } + + case *Fwd: + raw.SetKind(kindForward) + raw.SetFwdKind(v.Kind) + + case *Typedef: + raw.SetKind(kindTypedef) + raw.SetType(e.id(v.Type)) + + case *Volatile: + raw.SetKind(kindVolatile) + raw.SetType(e.id(v.Type)) + + case *Const: + e.deflateConst(&raw, v) + + case *Restrict: + raw.SetKind(kindRestrict) + raw.SetType(e.id(v.Type)) + + case *Func: + raw.SetKind(kindFunc) + raw.SetType(e.id(v.Type)) + if !e.StripFuncLinkage { + raw.SetLinkage(v.Linkage) + } + + case *FuncProto: + raw.SetKind(kindFuncProto) + raw.SetType(e.id(v.Return)) + raw.SetVlen(len(v.Params)) + buf, err = e.deflateFuncParams(buf, v.Params) + + case *Var: + raw.SetKind(kindVar) + raw.SetType(e.id(v.Type)) + buf, err = binary.Append(buf, e.Order, btfVariable{uint32(v.Linkage)}) + + case *Datasec: + raw.SetKind(kindDatasec) + raw.SetSize(v.Size) + raw.SetVlen(len(v.Vars)) + buf, err = e.deflateVarSecinfos(buf, v.Vars) + + case *Float: + raw.SetKind(kindFloat) + raw.SetSize(v.Size) + + case *declTag: + buf, err = e.deflateDeclTag(buf, &raw, v) + + case *TypeTag: + err = e.deflateTypeTag(&raw, v) + + default: + return nil, fmt.Errorf("don't know how to deflate %T", v) + } + + if err != nil { + return nil, err + } + + header := buf[start : start+int(unsafe.Sizeof(raw))] + if _, err = raw.Encode(header, e.Order); err != nil { + return nil, err + } + + return buf, nil +} + +func (e *encoder) deflateInt(buf []byte, raw *btfType, i *Int) ([]byte, error) { + raw.SetKind(kindInt) + raw.SetSize(i.Size) + + var bi btfInt + bi.SetEncoding(i.Encoding) + // We need to set bits in addition to size, since btf_type_int_is_regular + // otherwise flags this as a bitfield. + bi.SetBits(byte(i.Size) * 8) + return binary.Append(buf, e.Order, bi) +} + +func (e *encoder) deflateDeclTag(buf []byte, raw *btfType, tag *declTag) ([]byte, error) { + // Replace a decl tag with an integer for compatibility with <5.16 kernels, + // following libbpf behaviour. + if e.ReplaceDeclTags { + typ := &Int{"decl_tag_placeholder", 1, Unsigned} + buf, err := e.deflateInt(buf, raw, typ) + if err != nil { + return nil, err + } + + // Add the placeholder type name to the string table. The encoder added the + // original type name before this call. + raw.NameOff, err = e.strings.Add(typ.TypeName()) + return buf, err + } + + var err error + raw.SetKind(kindDeclTag) + raw.SetType(e.id(tag.Type)) + raw.NameOff, err = e.strings.Add(tag.Value) + if err != nil { + return nil, err + } + + return binary.Append(buf, e.Order, btfDeclTag{uint32(tag.Index)}) +} + +func (e *encoder) deflateConst(raw *btfType, c *Const) { + raw.SetKind(kindConst) + raw.SetType(e.id(c.Type)) +} + +func (e *encoder) deflateTypeTag(raw *btfType, tag *TypeTag) (err error) { + // Replace a type tag with a const qualifier for compatibility with <5.17 + // kernels, following libbpf behaviour. + if e.ReplaceTypeTags { + e.deflateConst(raw, &Const{tag.Type}) + return nil + } + + raw.SetKind(kindTypeTag) + raw.SetType(e.id(tag.Type)) + raw.NameOff, err = e.strings.Add(tag.Value) + return +} + +func (e *encoder) deflateUnion(buf []byte, raw *btfType, union *Union) ([]byte, error) { + raw.SetKind(kindUnion) + raw.SetSize(union.Size) + return e.deflateMembers(buf, raw, union.Members) +} + +func (e *encoder) deflateMembers(buf []byte, header *btfType, members []Member) ([]byte, error) { + var bm btfMember + isBitfield := false + + buf = slices.Grow(buf, len(members)*int(unsafe.Sizeof(bm))) + for _, member := range members { + isBitfield = isBitfield || member.BitfieldSize > 0 + + offset := member.Offset + if isBitfield { + offset = member.BitfieldSize<<24 | (member.Offset & 0xffffff) + } + + nameOff, err := e.strings.Add(member.Name) + if err != nil { + return nil, err + } + + bm = btfMember{ + nameOff, + e.id(member.Type), + uint32(offset), + } + + buf, err = binary.Append(buf, e.Order, &bm) + if err != nil { + return nil, err + } + } + + header.SetVlen(len(members)) + header.SetBitfield(isBitfield) + return buf, nil +} + +func (e *encoder) deflateEnum(buf []byte, raw *btfType, enum *Enum) ([]byte, error) { + raw.SetKind(kindEnum) + raw.SetSize(enum.Size) + raw.SetVlen(len(enum.Values)) + // Signedness appeared together with ENUM64 support. + raw.SetSigned(enum.Signed && !e.ReplaceEnum64) + return e.deflateEnumValues(buf, enum) +} + +func (e *encoder) deflateEnumValues(buf []byte, enum *Enum) ([]byte, error) { + var be btfEnum + buf = slices.Grow(buf, len(enum.Values)*int(unsafe.Sizeof(be))) + for _, value := range enum.Values { + nameOff, err := e.strings.Add(value.Name) + if err != nil { + return nil, err + } + + if enum.Signed { + if signedValue := int64(value.Value); signedValue < math.MinInt32 || signedValue > math.MaxInt32 { + return nil, fmt.Errorf("value %d of enum %q exceeds 32 bits", signedValue, value.Name) + } + } else { + if value.Value > math.MaxUint32 { + return nil, fmt.Errorf("value %d of enum %q exceeds 32 bits", value.Value, value.Name) + } + } + + be = btfEnum{ + nameOff, + uint32(value.Value), + } + + buf, err = binary.Append(buf, e.Order, &be) + if err != nil { + return nil, err + } + } + + return buf, nil +} + +func (e *encoder) deflateEnum64(buf []byte, raw *btfType, enum *Enum) ([]byte, error) { + if e.ReplaceEnum64 { + // Replace the ENUM64 with a union of fields with the correct size. + // This matches libbpf behaviour on purpose. + placeholder := &Int{ + "enum64_placeholder", + enum.Size, + Unsigned, + } + if enum.Signed { + placeholder.Encoding = Signed + } + if err := e.allocateIDs(placeholder); err != nil { + return nil, fmt.Errorf("add enum64 placeholder: %w", err) + } + + members := make([]Member, 0, len(enum.Values)) + for _, v := range enum.Values { + members = append(members, Member{ + Name: v.Name, + Type: placeholder, + }) + } + + return e.deflateUnion(buf, raw, &Union{enum.Name, enum.Size, members, nil}) + } + + raw.SetKind(kindEnum64) + raw.SetSize(enum.Size) + raw.SetVlen(len(enum.Values)) + raw.SetSigned(enum.Signed) + return e.deflateEnum64Values(buf, enum.Values) +} + +func (e *encoder) deflateEnum64Values(buf []byte, values []EnumValue) ([]byte, error) { + var be btfEnum64 + buf = slices.Grow(buf, len(values)*int(unsafe.Sizeof(be))) + for _, value := range values { + nameOff, err := e.strings.Add(value.Name) + if err != nil { + return nil, err + } + + be = btfEnum64{ + nameOff, + uint32(value.Value), + uint32(value.Value >> 32), + } + + buf, err = binary.Append(buf, e.Order, &be) + if err != nil { + return nil, err + } + } + + return buf, nil +} + +func (e *encoder) deflateFuncParams(buf []byte, params []FuncParam) ([]byte, error) { + var bp btfParam + buf = slices.Grow(buf, len(params)*int(unsafe.Sizeof(bp))) + for _, param := range params { + nameOff, err := e.strings.Add(param.Name) + if err != nil { + return nil, err + } + + bp = btfParam{ + nameOff, + e.id(param.Type), + } + + buf, err = binary.Append(buf, e.Order, &bp) + if err != nil { + return nil, err + } + } + return buf, nil +} + +func (e *encoder) deflateVarSecinfos(buf []byte, vars []VarSecinfo) ([]byte, error) { + var vsi btfVarSecinfo + var err error + buf = slices.Grow(buf, len(vars)*int(unsafe.Sizeof(vsi))) + for _, v := range vars { + vsi = btfVarSecinfo{ + e.id(v.Type), + v.Offset, + v.Size, + } + + buf, err = binary.Append(buf, e.Order, vsi) + if err != nil { + return nil, err + } + } + return buf, nil +} + +// MarshalMapKV creates a BTF object containing a map key and value. +// +// The function is intended for the use of the ebpf package and may be removed +// at any point in time. +func MarshalMapKV(key, value Type) (_ *Handle, keyID, valueID TypeID, err error) { + var b Builder + + if key != nil { + keyID, err = b.Add(key) + if err != nil { + return nil, 0, 0, fmt.Errorf("add key type: %w", err) + } + } + + if value != nil { + valueID, err = b.Add(value) + if err != nil { + return nil, 0, 0, fmt.Errorf("add value type: %w", err) + } + } + + handle, err := NewHandle(&b) + if err != nil { + // Check for 'full' map BTF support, since kernels between 4.18 and 5.2 + // already support BTF blobs for maps without Var or Datasec just fine. + if err := haveMapBTF(); err != nil { + return nil, 0, 0, err + } + } + return handle, keyID, valueID, err +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/btf/strings.go b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/strings.go new file mode 100644 index 000000000..482f93bef --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/strings.go @@ -0,0 +1,208 @@ +package btf + +import ( + "bytes" + "errors" + "fmt" + "io" + "maps" + "strings" + "sync" +) + +// stringTable contains a sequence of null-terminated strings. +// +// It is safe for concurrent use. +type stringTable struct { + base *stringTable + bytes []byte + + mu sync.Mutex + cache map[uint32]string +} + +// sizedReader is implemented by bytes.Reader, io.SectionReader, strings.Reader, etc. +type sizedReader interface { + io.Reader + Size() int64 +} + +func readStringTable(r sizedReader, base *stringTable) (*stringTable, error) { + bytes := make([]byte, r.Size()) + if _, err := io.ReadFull(r, bytes); err != nil { + return nil, err + } + + return newStringTable(bytes, base) +} + +func newStringTable(bytes []byte, base *stringTable) (*stringTable, error) { + // When parsing split BTF's string table, the first entry offset is derived + // from the last entry offset of the base BTF. + firstStringOffset := uint32(0) + if base != nil { + firstStringOffset = uint32(len(base.bytes)) + } + + if len(bytes) > 0 { + if bytes[len(bytes)-1] != 0 { + return nil, errors.New("string table isn't null terminated") + } + + if firstStringOffset == 0 && bytes[0] != 0 { + return nil, errors.New("first item in string table is non-empty") + } + } + + return &stringTable{base: base, bytes: bytes}, nil +} + +func (st *stringTable) Lookup(offset uint32) (string, error) { + // Fast path: zero offset is the empty string, looked up frequently. + if offset == 0 { + return "", nil + } + + b, err := st.lookupSlow(offset) + return string(b), err +} + +func (st *stringTable) LookupBytes(offset uint32) ([]byte, error) { + // Fast path: zero offset is the empty string, looked up frequently. + if offset == 0 { + return nil, nil + } + + return st.lookupSlow(offset) +} + +func (st *stringTable) lookupSlow(offset uint32) ([]byte, error) { + if st.base != nil { + n := uint32(len(st.base.bytes)) + if offset < n { + return st.base.lookupSlow(offset) + } + offset -= n + } + + if offset > uint32(len(st.bytes)) { + return nil, fmt.Errorf("offset %d is out of bounds of string table", offset) + } + + if offset > 0 && st.bytes[offset-1] != 0 { + return nil, fmt.Errorf("offset %d is not the beginning of a string", offset) + } + + i := bytes.IndexByte(st.bytes[offset:], 0) + return st.bytes[offset : offset+uint32(i)], nil +} + +// LookupCache returns the string at the given offset, caching the result +// for future lookups. +func (cst *stringTable) LookupCached(offset uint32) (string, error) { + // Fast path: zero offset is the empty string, looked up frequently. + if offset == 0 { + return "", nil + } + + cst.mu.Lock() + defer cst.mu.Unlock() + + if str, ok := cst.cache[offset]; ok { + return str, nil + } + + str, err := cst.Lookup(offset) + if err != nil { + return "", err + } + + if cst.cache == nil { + cst.cache = make(map[uint32]string) + } + cst.cache[offset] = str + return str, nil +} + +// stringTableBuilder builds BTF string tables. +type stringTableBuilder struct { + length uint32 + strings map[string]uint32 +} + +// newStringTableBuilder creates a builder with the given capacity. +// +// capacity may be zero. +func newStringTableBuilder(capacity int) *stringTableBuilder { + var stb stringTableBuilder + + if capacity == 0 { + // Use the runtime's small default size. + stb.strings = make(map[string]uint32) + } else { + stb.strings = make(map[string]uint32, capacity) + } + + // Ensure that the empty string is at index 0. + stb.append("") + return &stb +} + +// Add a string to the table. +// +// Adding the same string multiple times will only store it once. +func (stb *stringTableBuilder) Add(str string) (uint32, error) { + if strings.IndexByte(str, 0) != -1 { + return 0, fmt.Errorf("string contains null: %q", str) + } + + offset, ok := stb.strings[str] + if ok { + return offset, nil + } + + return stb.append(str), nil +} + +func (stb *stringTableBuilder) append(str string) uint32 { + offset := stb.length + stb.length += uint32(len(str)) + 1 + stb.strings[str] = offset + return offset +} + +// Lookup finds the offset of a string in the table. +// +// Returns an error if str hasn't been added yet. +func (stb *stringTableBuilder) Lookup(str string) (uint32, error) { + offset, ok := stb.strings[str] + if !ok { + return 0, fmt.Errorf("string %q is not in table", str) + } + + return offset, nil +} + +// Length returns the length in bytes. +func (stb *stringTableBuilder) Length() int { + return int(stb.length) +} + +// AppendEncoded appends the string table to the end of the provided buffer. +func (stb *stringTableBuilder) AppendEncoded(buf []byte) []byte { + n := len(buf) + buf = append(buf, make([]byte, stb.Length())...) + strings := buf[n:] + for str, offset := range stb.strings { + copy(strings[offset:], str) + } + return buf +} + +// Copy the string table builder. +func (stb *stringTableBuilder) Copy() *stringTableBuilder { + return &stringTableBuilder{ + stb.length, + maps.Clone(stb.strings), + } +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/btf/traversal.go b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/traversal.go new file mode 100644 index 000000000..57c1dc27e --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/traversal.go @@ -0,0 +1,159 @@ +package btf + +import ( + "fmt" + "iter" +) + +// Functions to traverse a cyclic graph of types. The below was very useful: +// https://eli.thegreenplace.net/2015/directed-graph-traversal-orderings-and-applications-to-data-flow-analysis/#post-order-and-reverse-post-order + +// postorder yields all types reachable from root in post order. +func postorder(root Type, visited map[Type]struct{}) iter.Seq[Type] { + return func(yield func(Type) bool) { + visitInPostorder(root, visited, yield) + } +} + +// visitInPostorder is a separate function to avoid arguments escaping +// to the heap. Don't change the setup without re-running the benchmarks. +func visitInPostorder(root Type, visited map[Type]struct{}, yield func(typ Type) bool) bool { + if _, ok := visited[root]; ok { + return true + } + if visited == nil { + visited = make(map[Type]struct{}) + } + visited[root] = struct{}{} + + for child := range children(root) { + if !visitInPostorder(*child, visited, yield) { + return false + } + } + + return yield(root) +} + +// children yields all direct descendants of typ. +func children(typ Type) iter.Seq[*Type] { + return func(yield func(*Type) bool) { + // Explicitly type switch on the most common types to allow the inliner to + // do its work. This avoids allocating intermediate slices from walk() on + // the heap. + var tags []string + switch v := typ.(type) { + case *Void, *Int, *Enum, *Fwd, *Float, *declTag: + // No children to traverse. + // declTags is declared as a leaf type since it's parsed into .Tags fields of other types + // during unmarshaling. + case *Pointer: + if !yield(&v.Target) { + return + } + case *Array: + if !yield(&v.Index) { + return + } + if !yield(&v.Type) { + return + } + case *Struct: + for i := range v.Members { + if !yield(&v.Members[i].Type) { + return + } + for _, t := range v.Members[i].Tags { + var tag Type = &declTag{v, t, i} + if !yield(&tag) { + return + } + } + } + tags = v.Tags + case *Union: + for i := range v.Members { + if !yield(&v.Members[i].Type) { + return + } + for _, t := range v.Members[i].Tags { + var tag Type = &declTag{v, t, i} + if !yield(&tag) { + return + } + } + } + tags = v.Tags + case *Typedef: + if !yield(&v.Type) { + return + } + tags = v.Tags + case *Volatile: + if !yield(&v.Type) { + return + } + case *Const: + if !yield(&v.Type) { + return + } + case *Restrict: + if !yield(&v.Type) { + return + } + case *Func: + if !yield(&v.Type) { + return + } + if fp, ok := v.Type.(*FuncProto); ok { + for i := range fp.Params { + if len(v.ParamTags) <= i { + continue + } + for _, t := range v.ParamTags[i] { + var tag Type = &declTag{v, t, i} + if !yield(&tag) { + return + } + } + } + } + tags = v.Tags + case *FuncProto: + if !yield(&v.Return) { + return + } + for i := range v.Params { + if !yield(&v.Params[i].Type) { + return + } + } + case *Var: + if !yield(&v.Type) { + return + } + tags = v.Tags + case *Datasec: + for i := range v.Vars { + if !yield(&v.Vars[i].Type) { + return + } + } + case *TypeTag: + if !yield(&v.Type) { + return + } + case *cycle: + // cycle has children, but we ignore them deliberately. + default: + panic(fmt.Sprintf("don't know how to walk Type %T", v)) + } + + for _, t := range tags { + var tag Type = &declTag{typ, t, -1} + if !yield(&tag) { + return + } + } + } +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/btf/types.go b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/types.go new file mode 100644 index 000000000..fc0a59744 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/types.go @@ -0,0 +1,910 @@ +package btf + +import ( + "errors" + "fmt" + "io" + "math" + "strings" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" +) + +// Mirrors MAX_RESOLVE_DEPTH in libbpf. +// https://github.com/libbpf/libbpf/blob/e26b84dc330c9644c07428c271ab491b0f01f4e1/src/btf.c#L761 +const maxResolveDepth = 32 + +// TypeID identifies a type in a BTF section. +type TypeID = sys.TypeID + +// Type represents a type described by BTF. +// +// Identity of Type follows the [Go specification]: two Types are considered +// equal if they have the same concrete type and the same dynamic value, aka +// they point at the same location in memory. This means that the following +// Types are considered distinct even though they have the same "shape". +// +// a := &Int{Size: 1} +// b := &Int{Size: 1} +// a != b +// +// [Go specification]: https://go.dev/ref/spec#Comparison_operators +type Type interface { + // Type can be formatted using the %s and %v verbs. %s outputs only the + // identity of the type, without any detail. %v outputs additional detail. + // + // Use the '+' flag to include the address of the type. + // + // Use the width to specify how many levels of detail to output, for example + // %1v will output detail for the root type and a short description of its + // children. %2v would output details of the root type and its children + // as well as a short description of the grandchildren. + fmt.Formatter + + // Name of the type, empty for anonymous types and types that cannot + // carry a name, like Void and Pointer. + TypeName() string + + // Make a copy of the type, without copying Type members. + copy() Type + + // New implementations must update walkType. +} + +var ( + _ Type = (*Int)(nil) + _ Type = (*Struct)(nil) + _ Type = (*Union)(nil) + _ Type = (*Enum)(nil) + _ Type = (*Fwd)(nil) + _ Type = (*Func)(nil) + _ Type = (*Typedef)(nil) + _ Type = (*Var)(nil) + _ Type = (*Datasec)(nil) + _ Type = (*Float)(nil) + _ Type = (*declTag)(nil) + _ Type = (*TypeTag)(nil) + _ Type = (*cycle)(nil) +) + +// Void is the unit type of BTF. +type Void struct{} + +func (v *Void) Format(fs fmt.State, verb rune) { formatType(fs, verb, v) } +func (v *Void) TypeName() string { return "" } +func (v *Void) size() uint32 { return 0 } +func (v *Void) copy() Type { return (*Void)(nil) } + +type IntEncoding byte + +// Valid IntEncodings. +// +// These may look like they are flags, but they aren't. +const ( + Unsigned IntEncoding = 0 + Signed IntEncoding = 1 + Char IntEncoding = 2 + Bool IntEncoding = 4 +) + +func (ie IntEncoding) String() string { + switch ie { + case Char: + // NB: There is no way to determine signedness for char. + return "char" + case Bool: + return "bool" + case Signed: + return "signed" + case Unsigned: + return "unsigned" + default: + return fmt.Sprintf("IntEncoding(%d)", byte(ie)) + } +} + +// Int is an integer of a given length. +// +// See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int +type Int struct { + Name string + + // The size of the integer in bytes. + Size uint32 + Encoding IntEncoding +} + +func (i *Int) Format(fs fmt.State, verb rune) { + formatType(fs, verb, i, i.Encoding, "size=", i.Size) +} + +func (i *Int) TypeName() string { return i.Name } +func (i *Int) size() uint32 { return i.Size } +func (i *Int) copy() Type { + cpy := *i + return &cpy +} + +// Pointer is a pointer to another type. +type Pointer struct { + Target Type +} + +func (p *Pointer) Format(fs fmt.State, verb rune) { + formatType(fs, verb, p, "target=", p.Target) +} + +func (p *Pointer) TypeName() string { return "" } +func (p *Pointer) size() uint32 { return 8 } +func (p *Pointer) copy() Type { + cpy := *p + return &cpy +} + +// Array is an array with a fixed number of elements. +type Array struct { + Index Type + Type Type + Nelems uint32 +} + +func (arr *Array) Format(fs fmt.State, verb rune) { + formatType(fs, verb, arr, "index=", arr.Index, "type=", arr.Type, "n=", arr.Nelems) +} + +func (arr *Array) TypeName() string { return "" } + +func (arr *Array) copy() Type { + cpy := *arr + return &cpy +} + +// Struct is a compound type of consecutive members. +type Struct struct { + Name string + // The size of the struct including padding, in bytes + Size uint32 + Members []Member + Tags []string +} + +func (s *Struct) Format(fs fmt.State, verb rune) { + formatType(fs, verb, s, "fields=", len(s.Members)) +} + +func (s *Struct) TypeName() string { return s.Name } + +func (s *Struct) size() uint32 { return s.Size } + +func (s *Struct) copy() Type { + cpy := *s + cpy.Members = copyMembers(s.Members) + cpy.Tags = copyTags(cpy.Tags) + return &cpy +} + +func (s *Struct) members() []Member { + return s.Members +} + +// Union is a compound type where members occupy the same memory. +type Union struct { + Name string + // The size of the union including padding, in bytes. + Size uint32 + Members []Member + Tags []string +} + +func (u *Union) Format(fs fmt.State, verb rune) { + formatType(fs, verb, u, "fields=", len(u.Members)) +} + +func (u *Union) TypeName() string { return u.Name } + +func (u *Union) size() uint32 { return u.Size } + +func (u *Union) copy() Type { + cpy := *u + cpy.Members = copyMembers(u.Members) + cpy.Tags = copyTags(cpy.Tags) + return &cpy +} + +func (u *Union) members() []Member { + return u.Members +} + +func copyMembers(orig []Member) []Member { + cpy := make([]Member, len(orig)) + copy(cpy, orig) + for i, member := range cpy { + cpy[i].Tags = copyTags(member.Tags) + } + return cpy +} + +func copyTags(orig []string) []string { + if orig == nil { // preserve nil vs zero-len slice distinction + return nil + } + cpy := make([]string, len(orig)) + copy(cpy, orig) + return cpy +} + +type composite interface { + Type + members() []Member +} + +var ( + _ composite = (*Struct)(nil) + _ composite = (*Union)(nil) +) + +// A value in bits. +type Bits uint32 + +// Bytes converts a bit value into bytes. +func (b Bits) Bytes() uint32 { + return uint32(b / 8) +} + +// Member is part of a Struct or Union. +// +// It is not a valid Type. +type Member struct { + Name string + Type Type + Offset Bits + BitfieldSize Bits + Tags []string +} + +// Enum lists possible values. +type Enum struct { + Name string + // Size of the enum value in bytes. + Size uint32 + // True if the values should be interpreted as signed integers. + Signed bool + Values []EnumValue +} + +func (e *Enum) Format(fs fmt.State, verb rune) { + formatType(fs, verb, e, "size=", e.Size, "values=", len(e.Values)) +} + +func (e *Enum) TypeName() string { return e.Name } + +// EnumValue is part of an Enum +// +// Is is not a valid Type +type EnumValue struct { + Name string + Value uint64 +} + +func (e *Enum) size() uint32 { return e.Size } +func (e *Enum) copy() Type { + cpy := *e + cpy.Values = make([]EnumValue, len(e.Values)) + copy(cpy.Values, e.Values) + return &cpy +} + +// FwdKind is the type of forward declaration. +type FwdKind int + +// Valid types of forward declaration. +const ( + FwdStruct FwdKind = iota + FwdUnion +) + +func (fk FwdKind) String() string { + switch fk { + case FwdStruct: + return "struct" + case FwdUnion: + return "union" + default: + return fmt.Sprintf("%T(%d)", fk, int(fk)) + } +} + +// Fwd is a forward declaration of a Type. +type Fwd struct { + Name string + Kind FwdKind +} + +func (f *Fwd) Format(fs fmt.State, verb rune) { + formatType(fs, verb, f, f.Kind) +} + +func (f *Fwd) TypeName() string { return f.Name } + +func (f *Fwd) copy() Type { + cpy := *f + return &cpy +} + +func (f *Fwd) matches(typ Type) bool { + if _, ok := As[*Struct](typ); ok && f.Kind == FwdStruct { + return true + } + + if _, ok := As[*Union](typ); ok && f.Kind == FwdUnion { + return true + } + + return false +} + +// Typedef is an alias of a Type. +type Typedef struct { + Name string + Type Type + Tags []string +} + +func (td *Typedef) Format(fs fmt.State, verb rune) { + formatType(fs, verb, td, td.Type) +} + +func (td *Typedef) TypeName() string { return td.Name } + +func (td *Typedef) copy() Type { + cpy := *td + cpy.Tags = copyTags(td.Tags) + return &cpy +} + +// Volatile is a qualifier. +type Volatile struct { + Type Type +} + +func (v *Volatile) Format(fs fmt.State, verb rune) { + formatType(fs, verb, v, v.Type) +} + +func (v *Volatile) TypeName() string { return "" } + +func (v *Volatile) qualify() Type { return v.Type } +func (v *Volatile) copy() Type { + cpy := *v + return &cpy +} + +// Const is a qualifier. +type Const struct { + Type Type +} + +func (c *Const) Format(fs fmt.State, verb rune) { + formatType(fs, verb, c, c.Type) +} + +func (c *Const) TypeName() string { return "" } + +func (c *Const) qualify() Type { return c.Type } +func (c *Const) copy() Type { + cpy := *c + return &cpy +} + +// Restrict is a qualifier. +type Restrict struct { + Type Type +} + +func (r *Restrict) Format(fs fmt.State, verb rune) { + formatType(fs, verb, r, r.Type) +} + +func (r *Restrict) TypeName() string { return "" } + +func (r *Restrict) qualify() Type { return r.Type } +func (r *Restrict) copy() Type { + cpy := *r + return &cpy +} + +// Func is a function definition. +type Func struct { + Name string + Type Type + Linkage FuncLinkage + Tags []string + // ParamTags holds a list of tags for each parameter of the FuncProto to which `Type` points. + // If no tags are present for any param, the outer slice will be nil/len(ParamTags)==0. + // If at least 1 param has a tag, the outer slice will have the same length as the number of params. + // The inner slice contains the tags and may be nil/len(ParamTags[i])==0 if no tags are present for that param. + ParamTags [][]string +} + +func FuncMetadata(ins *asm.Instruction) *Func { + fn, _ := ins.Metadata.Get(funcInfoMeta{}).(*Func) + return fn +} + +// WithFuncMetadata adds a btf.Func to the Metadata of asm.Instruction. +func WithFuncMetadata(ins asm.Instruction, fn *Func) asm.Instruction { + ins.Metadata.Set(funcInfoMeta{}, fn) + return ins +} + +func (f *Func) Format(fs fmt.State, verb rune) { + formatType(fs, verb, f, f.Linkage, "proto=", f.Type) +} + +func (f *Func) TypeName() string { return f.Name } + +func (f *Func) copy() Type { + cpy := *f + cpy.Tags = copyTags(f.Tags) + if f.ParamTags != nil { // preserve nil vs zero-len slice distinction + ptCopy := make([][]string, len(f.ParamTags)) + for i, tags := range f.ParamTags { + ptCopy[i] = copyTags(tags) + } + cpy.ParamTags = ptCopy + } + return &cpy +} + +// FuncProto is a function declaration. +type FuncProto struct { + Return Type + Params []FuncParam +} + +func (fp *FuncProto) Format(fs fmt.State, verb rune) { + formatType(fs, verb, fp, "args=", len(fp.Params), "return=", fp.Return) +} + +func (fp *FuncProto) TypeName() string { return "" } + +func (fp *FuncProto) copy() Type { + cpy := *fp + cpy.Params = make([]FuncParam, len(fp.Params)) + copy(cpy.Params, fp.Params) + return &cpy +} + +type FuncParam struct { + Name string + Type Type +} + +// Var is a global variable. +type Var struct { + Name string + Type Type + Linkage VarLinkage + Tags []string +} + +func (v *Var) Format(fs fmt.State, verb rune) { + formatType(fs, verb, v, v.Linkage) +} + +func (v *Var) TypeName() string { return v.Name } + +func (v *Var) copy() Type { + cpy := *v + cpy.Tags = copyTags(v.Tags) + return &cpy +} + +// Datasec is a global program section containing data. +type Datasec struct { + Name string + Size uint32 + Vars []VarSecinfo +} + +func (ds *Datasec) Format(fs fmt.State, verb rune) { + formatType(fs, verb, ds) +} + +func (ds *Datasec) TypeName() string { return ds.Name } + +func (ds *Datasec) size() uint32 { return ds.Size } + +func (ds *Datasec) copy() Type { + cpy := *ds + cpy.Vars = make([]VarSecinfo, len(ds.Vars)) + copy(cpy.Vars, ds.Vars) + return &cpy +} + +// VarSecinfo describes variable in a Datasec. +// +// It is not a valid Type. +type VarSecinfo struct { + // Var or Func. + Type Type + Offset uint32 + Size uint32 +} + +// Float is a float of a given length. +type Float struct { + Name string + + // The size of the float in bytes. + Size uint32 +} + +func (f *Float) Format(fs fmt.State, verb rune) { + formatType(fs, verb, f, "size=", f.Size*8) +} + +func (f *Float) TypeName() string { return f.Name } +func (f *Float) size() uint32 { return f.Size } +func (f *Float) copy() Type { + cpy := *f + return &cpy +} + +// declTag associates metadata with a declaration. +type declTag struct { + Type Type + Value string + // The index this tag refers to in the target type. For composite types, + // a value of -1 indicates that the tag refers to the whole type. Otherwise + // it indicates which member or argument the tag applies to. + Index int +} + +func (dt *declTag) Format(fs fmt.State, verb rune) { + formatType(fs, verb, dt, "type=", dt.Type, "value=", dt.Value, "index=", dt.Index) +} + +func (dt *declTag) TypeName() string { return "" } +func (dt *declTag) copy() Type { + cpy := *dt + return &cpy +} + +// TypeTag associates metadata with a pointer type. Tag types act as a custom +// modifier(const, restrict, volatile) for the target type. Unlike declTags, +// TypeTags are ordered so the order in which they are added matters. +// +// One of their uses is to mark pointers as `__kptr` meaning a pointer points +// to kernel memory. Adding a `__kptr` to pointers in map values allows you +// to store pointers to kernel memory in maps. +type TypeTag struct { + Type Type + Value string +} + +func (tt *TypeTag) Format(fs fmt.State, verb rune) { + formatType(fs, verb, tt, "type=", tt.Type, "value=", tt.Value) +} + +func (tt *TypeTag) TypeName() string { return "" } +func (tt *TypeTag) qualify() Type { return tt.Type } +func (tt *TypeTag) copy() Type { + cpy := *tt + return &cpy +} + +// cycle is a type which had to be elided since it exceeded maxTypeDepth. +type cycle struct { + root Type +} + +func (c *cycle) ID() TypeID { return math.MaxUint32 } +func (c *cycle) Format(fs fmt.State, verb rune) { formatType(fs, verb, c, "root=", c.root) } +func (c *cycle) TypeName() string { return "" } +func (c *cycle) copy() Type { + cpy := *c + return &cpy +} + +type sizer interface { + size() uint32 +} + +var ( + _ sizer = (*Int)(nil) + _ sizer = (*Pointer)(nil) + _ sizer = (*Struct)(nil) + _ sizer = (*Union)(nil) + _ sizer = (*Enum)(nil) + _ sizer = (*Datasec)(nil) +) + +type qualifier interface { + qualify() Type +} + +var ( + _ qualifier = (*Const)(nil) + _ qualifier = (*Restrict)(nil) + _ qualifier = (*Volatile)(nil) + _ qualifier = (*TypeTag)(nil) +) + +var errUnsizedType = errors.New("type is unsized") + +// Sizeof returns the size of a type in bytes. +// +// Returns an error if the size can't be computed. +func Sizeof(typ Type) (int, error) { + var ( + n = int64(1) + elem int64 + ) + + for i := 0; i < maxResolveDepth; i++ { + switch v := typ.(type) { + case *Array: + if n > 0 && int64(v.Nelems) > math.MaxInt64/n { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + // Arrays may be of zero length, which allows + // n to be zero as well. + n *= int64(v.Nelems) + typ = v.Type + continue + + case sizer: + elem = int64(v.size()) + + case *Typedef: + typ = v.Type + continue + + case qualifier: + typ = v.qualify() + continue + + default: + return 0, fmt.Errorf("type %T: %w", typ, errUnsizedType) + } + + if n > 0 && elem > math.MaxInt64/n { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + size := n * elem + if int64(int(size)) != size { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + return int(size), nil + } + + return 0, fmt.Errorf("type %s: exceeded type depth", typ) +} + +// alignof returns the alignment of a type. +// +// Returns an error if the Type can't be aligned, like an integer with an uneven +// size. Currently only supports the subset of types necessary for bitfield +// relocations. +func alignof(typ Type) (int, error) { + var n int + + switch t := UnderlyingType(typ).(type) { + case *Enum: + n = int(t.size()) + case *Int: + n = int(t.Size) + case *Array: + return alignof(t.Type) + default: + return 0, fmt.Errorf("can't calculate alignment of %T", t) + } + + if !internal.IsPow(n) { + return 0, fmt.Errorf("alignment value %d is not a power of two", n) + } + + return n, nil +} + +// Copy a Type recursively. +// +// typ may form a cycle. +func Copy(typ Type) Type { + return copyType(typ, nil, make(map[Type]Type), nil) +} + +func copyType(typ Type, ids map[Type]TypeID, copies map[Type]Type, copiedIDs map[Type]TypeID) Type { + if typ == nil { + return nil + } + + cpy, ok := copies[typ] + if ok { + // This has been copied previously, no need to continue. + return cpy + } + + cpy = typ.copy() + copies[typ] = cpy + + if id, ok := ids[typ]; ok { + copiedIDs[cpy] = id + } + + for child := range children(cpy) { + *child = copyType(*child, ids, copies, copiedIDs) + } + + return cpy +} + +type typeDeque = internal.Deque[*Type] + +// essentialName represents the name of a BTF type stripped of any flavor +// suffixes after a ___ delimiter. +type essentialName string + +// newEssentialName returns name without a ___ suffix. +// +// CO-RE has the concept of 'struct flavors', which are used to deal with +// changes in kernel data structures. Anything after three underscores +// in a type name is ignored for the purpose of finding a candidate type +// in the kernel's BTF. +func newEssentialName(name string) essentialName { + if name == "" { + return "" + } + lastIdx := strings.LastIndex(name, "___") + if lastIdx > 0 { + return essentialName(name[:lastIdx]) + } + return essentialName(name) +} + +// UnderlyingType skips qualifiers and Typedefs. +func UnderlyingType(typ Type) Type { + result := typ + for depth := 0; depth <= maxResolveDepth; depth++ { + switch v := (result).(type) { + case qualifier: + result = v.qualify() + case *Typedef: + result = v.Type + default: + return result + } + } + return &cycle{typ} +} + +// QualifiedType returns the type with all qualifiers removed. +func QualifiedType(typ Type) Type { + result := typ + for depth := 0; depth <= maxResolveDepth; depth++ { + switch v := (result).(type) { + case qualifier: + result = v.qualify() + default: + return result + } + } + return &cycle{typ} +} + +// As returns typ if is of type T. Otherwise it peels qualifiers and Typedefs +// until it finds a T. +// +// Returns the zero value and false if there is no T or if the type is nested +// too deeply. +func As[T Type](typ Type) (T, bool) { + // NB: We can't make this function return (*T) since then + // we can't assert that a type matches an interface which + // embeds Type: as[composite](T). + for depth := 0; depth <= maxResolveDepth; depth++ { + switch v := (typ).(type) { + case T: + return v, true + case qualifier: + typ = v.qualify() + case *Typedef: + typ = v.Type + default: + goto notFound + } + } +notFound: + var zero T + return zero, false +} + +type formatState struct { + fmt.State + depth int +} + +// formattableType is a subset of Type, to ease unit testing of formatType. +type formattableType interface { + fmt.Formatter + TypeName() string +} + +// formatType formats a type in a canonical form. +// +// Handles cyclical types by only printing cycles up to a certain depth. Elements +// in extra are separated by spaces unless the preceding element is a string +// ending in '='. +func formatType(f fmt.State, verb rune, t formattableType, extra ...interface{}) { + if verb != 'v' && verb != 's' { + fmt.Fprintf(f, "{UNRECOGNIZED: %c}", verb) + return + } + + _, _ = io.WriteString(f, internal.GoTypeName(t)) + + if name := t.TypeName(); name != "" { + // Output BTF type name if present. + fmt.Fprintf(f, ":%q", name) + } + + if f.Flag('+') { + // Output address if requested. + fmt.Fprintf(f, ":%#p", t) + } + + if verb == 's' { + // %s omits details. + return + } + + var depth int + if ps, ok := f.(*formatState); ok { + depth = ps.depth + f = ps.State + } + + maxDepth, ok := f.Width() + if !ok { + maxDepth = 0 + } + + if depth > maxDepth { + // We've reached the maximum depth. This avoids infinite recursion even + // for cyclical types. + return + } + + if len(extra) == 0 { + return + } + + wantSpace := false + _, _ = io.WriteString(f, "[") + for _, arg := range extra { + if wantSpace { + _, _ = io.WriteString(f, " ") + } + + switch v := arg.(type) { + case string: + _, _ = io.WriteString(f, v) + wantSpace = len(v) > 0 && v[len(v)-1] != '=' + continue + + case formattableType: + v.Format(&formatState{f, depth + 1}, verb) + + default: + fmt.Fprint(f, arg) + } + + wantSpace = true + } + _, _ = io.WriteString(f, "]") +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/btf/unmarshal.go b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/unmarshal.go new file mode 100644 index 000000000..26ae320d2 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/unmarshal.go @@ -0,0 +1,789 @@ +package btf + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash/maphash" + "io" + "iter" + "maps" + "math" + "slices" + "sync" +) + +// sharedBuf is a buffer which may be shared between multiple decoders. +// +// It must not be modified. Some sharedBuf may be backed by an mmap-ed file, in +// which case the sharedBuf has a finalizer. sharedBuf must therefore always be +// passed as a pointer. +type sharedBuf struct { + raw []byte +} + +type decoder struct { + // Immutable fields, may be shared. + + base *decoder + byteOrder binary.ByteOrder + *sharedBuf + strings *stringTable + // The ID for offsets[0]. + firstTypeID TypeID + // Map from TypeID to offset of the marshaled data in raw. Contains an entry + // for each TypeID, including 0 aka Void. The offset for Void is invalid. + offsets []int + declTags map[TypeID][]TypeID + // An index from essentialName to TypeID. + namedTypes *fuzzyStringIndex + + // Protection for mutable fields below. + mu sync.Mutex + types map[TypeID]Type + typeIDs map[Type]TypeID + legacyBitfields map[TypeID][2]Bits // offset, size +} + +func newDecoder(raw []byte, bo binary.ByteOrder, strings *stringTable, base *decoder) (*decoder, error) { + firstTypeID := TypeID(0) + if base != nil { + if base.byteOrder != bo { + return nil, fmt.Errorf("can't use %v base with %v split BTF", base.byteOrder, bo) + } + + if base.firstTypeID != 0 { + return nil, fmt.Errorf("can't use split BTF as base") + } + + firstTypeID = TypeID(len(base.offsets)) + } + + var header btfType + var numTypes, numDeclTags, numNamedTypes int + + for _, err := range allBtfTypeOffsets(raw, bo, &header) { + if err != nil { + return nil, err + } + + numTypes++ + + if header.Kind() == kindDeclTag { + numDeclTags++ + } + + if header.NameOff != 0 { + numNamedTypes++ + } + } + + if firstTypeID == 0 { + // Allocate an extra slot for Void so we don't have to deal with + // constant off by one issues. + numTypes++ + } + + offsets := make([]int, 0, numTypes) + declTags := make(map[TypeID][]TypeID, numDeclTags) + namedTypes := newFuzzyStringIndex(numNamedTypes) + + if firstTypeID == 0 { + // Add a sentinel for Void. + offsets = append(offsets, math.MaxInt) + } + + id := firstTypeID + TypeID(len(offsets)) + for offset := range allBtfTypeOffsets(raw, bo, &header) { + if id < firstTypeID { + return nil, fmt.Errorf("no more type IDs") + } + + offsets = append(offsets, offset) + + if header.Kind() == kindDeclTag { + declTags[header.Type()] = append(declTags[header.Type()], id) + } + + // Build named type index. + name, err := strings.LookupBytes(header.NameOff) + if err != nil { + return nil, fmt.Errorf("lookup type name for id %v: %w", id, err) + } + + if len(name) > 0 { + if i := bytes.Index(name, []byte("___")); i != -1 { + // Flavours are rare. It's cheaper to find the first index for some + // reason. + i = bytes.LastIndex(name, []byte("___")) + name = name[:i] + } + + namedTypes.Add(name, id) + } + + id++ + } + + namedTypes.Build() + + return &decoder{ + base, + bo, + &sharedBuf{raw}, + strings, + firstTypeID, + offsets, + declTags, + namedTypes, + sync.Mutex{}, + make(map[TypeID]Type), + make(map[Type]TypeID), + make(map[TypeID][2]Bits), + }, nil +} + +func allBtfTypeOffsets(buf []byte, bo binary.ByteOrder, header *btfType) iter.Seq2[int, error] { + return func(yield func(int, error) bool) { + for offset := 0; offset < len(buf); { + start := offset + + n, err := unmarshalBtfType(header, buf[offset:], bo) + if err != nil { + yield(-1, fmt.Errorf("unmarshal type header: %w", err)) + return + } + offset += n + + n, err = header.DataLen() + if err != nil { + yield(-1, err) + return + } + offset += n + + if offset > len(buf) { + yield(-1, fmt.Errorf("auxiliary type data: %w", io.ErrUnexpectedEOF)) + return + } + + if !yield(start, nil) { + return + } + } + } +} + +func rebaseDecoder(d *decoder, base *decoder) (*decoder, error) { + if d.base == nil { + return nil, fmt.Errorf("rebase split spec: not a split spec") + } + + if len(d.base.raw) != len(base.raw) || (len(d.base.raw) > 0 && &d.base.raw[0] != &base.raw[0]) { + return nil, fmt.Errorf("rebase split spec: raw BTF differs") + } + + return &decoder{ + base, + d.byteOrder, + d.sharedBuf, + d.strings, + d.firstTypeID, + d.offsets, + d.declTags, + d.namedTypes, + sync.Mutex{}, + make(map[TypeID]Type), + make(map[Type]TypeID), + make(map[TypeID][2]Bits), + }, nil +} + +// Copy performs a deep copy of a decoder and its base. +func (d *decoder) Copy() *decoder { + if d == nil { + return nil + } + + return d.copy(nil) +} + +func (d *decoder) copy(copiedTypes map[Type]Type) *decoder { + if d == nil { + return nil + } + + d.mu.Lock() + defer d.mu.Unlock() + + if copiedTypes == nil { + copiedTypes = make(map[Type]Type, len(d.types)) + } + + types := make(map[TypeID]Type, len(d.types)) + typeIDs := make(map[Type]TypeID, len(d.typeIDs)) + for id, typ := range d.types { + types[id] = copyType(typ, d.typeIDs, copiedTypes, typeIDs) + } + + return &decoder{ + d.base.copy(copiedTypes), + d.byteOrder, + d.sharedBuf, + d.strings, + d.firstTypeID, + d.offsets, + d.declTags, + d.namedTypes, + sync.Mutex{}, + types, + typeIDs, + maps.Clone(d.legacyBitfields), + } +} + +// TypeID returns the ID for a Type previously obtained via [TypeByID]. +func (d *decoder) TypeID(typ Type) (TypeID, error) { + if _, ok := typ.(*Void); ok { + // Equality is weird for void, since it is a zero sized type. + return 0, nil + } + + d.mu.Lock() + defer d.mu.Unlock() + + id, ok := d.typeIDs[typ] + if !ok { + return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound) + } + + return id, nil +} + +// TypesByName returns all types which have the given essential name. +// +// Returns ErrNotFound if no matching Type exists. +func (d *decoder) TypesByName(name essentialName) ([]Type, error) { + var types []Type + for id := range d.namedTypes.Find(string(name)) { + typ, err := d.TypeByID(id) + if err != nil { + return nil, err + } + + if newEssentialName(typ.TypeName()) == name { + // Deal with hash collisions by checking against the name. + types = append(types, typ) + } + } + + if len(types) == 0 { + // Return an unwrapped error because this is on the hot path + // for CO-RE. + return nil, ErrNotFound + } + + return types, nil +} + +// TypeByID decodes a type and any of its descendants. +func (d *decoder) TypeByID(id TypeID) (Type, error) { + d.mu.Lock() + defer d.mu.Unlock() + + return d.inflateType(id) +} + +func (d *decoder) inflateType(id TypeID) (typ Type, err error) { + defer func() { + if r := recover(); r != nil { + err = r.(error) + } + + // err is the return value of the enclosing function, even if an explicit + // return is used. + // See https://go.dev/ref/spec#Defer_statements + if err != nil { + // Remove partially inflated type so that d.types only contains + // fully inflated ones. + delete(d.types, id) + } else { + // Populate reverse index. + d.typeIDs[typ] = id + } + }() + + if id < d.firstTypeID { + return d.base.inflateType(id) + } + + if id == 0 { + // Void is defined to always be type ID 0, and is thus omitted from BTF. + // Fast-path because it is looked up frequently. + return (*Void)(nil), nil + } + + if typ, ok := d.types[id]; ok { + return typ, nil + } + + fixup := func(id TypeID, typ *Type) { + fixup, err := d.inflateType(id) + if err != nil { + panic(err) + } + *typ = fixup + } + + convertMembers := func(header *btfType, buf []byte) ([]Member, error) { + var bm btfMember + members := make([]Member, 0, header.Vlen()) + for i := range header.Vlen() { + n, err := unmarshalBtfMember(&bm, buf, d.byteOrder) + if err != nil { + return nil, fmt.Errorf("unmarshal member: %w", err) + } + buf = buf[n:] + + name, err := d.strings.Lookup(bm.NameOff) + if err != nil { + return nil, fmt.Errorf("can't get name for member %d: %w", i, err) + } + + members = append(members, Member{ + Name: name, + Offset: Bits(bm.Offset), + }) + + m := &members[i] + fixup(bm.Type, &m.Type) + + if header.Bitfield() { + m.BitfieldSize = Bits(bm.Offset >> 24) + m.Offset &= 0xffffff + // We ignore legacy bitfield definitions if the current composite + // is a new-style bitfield. This is kind of safe since offset and + // size on the type of the member must be zero if kindFlat is set + // according to spec. + continue + } + + // This may be a legacy bitfield, try to fix it up. + data, ok := d.legacyBitfields[bm.Type] + if ok { + // Bingo! + m.Offset += data[0] + m.BitfieldSize = data[1] + continue + } + } + return members, nil + } + + idx := int(id - d.firstTypeID) + if idx >= len(d.offsets) { + return nil, fmt.Errorf("type id %v: %w", id, ErrNotFound) + } + + offset := d.offsets[idx] + if offset >= len(d.raw) { + return nil, fmt.Errorf("offset out of bounds") + } + + var ( + header btfType + bInt btfInt + bArr btfArray + bVariable btfVariable + bDeclTag btfDeclTag + pos = d.raw[offset:] + ) + + { + if n, err := unmarshalBtfType(&header, pos, d.byteOrder); err != nil { + return nil, fmt.Errorf("can't unmarshal type info for id %v: %v", id, err) + } else { + pos = pos[n:] + } + + name, err := d.strings.Lookup(header.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for type id %d: %w", id, err) + } + + switch header.Kind() { + case kindInt: + size := header.Size() + if _, err := unmarshalBtfInt(&bInt, pos, d.byteOrder); err != nil { + return nil, fmt.Errorf("can't unmarshal btfInt, id: %d: %w", id, err) + } + if bInt.Offset() > 0 || bInt.Bits().Bytes() != size { + d.legacyBitfields[id] = [2]Bits{bInt.Offset(), bInt.Bits()} + } + typ = &Int{name, header.Size(), bInt.Encoding()} + d.types[id] = typ + + case kindPointer: + ptr := &Pointer{nil} + d.types[id] = ptr + + fixup(header.Type(), &ptr.Target) + typ = ptr + + case kindArray: + if _, err := unmarshalBtfArray(&bArr, pos, d.byteOrder); err != nil { + return nil, fmt.Errorf("can't unmarshal btfArray, id: %d: %w", id, err) + } + + arr := &Array{nil, nil, bArr.Nelems} + d.types[id] = arr + + fixup(bArr.IndexType, &arr.Index) + fixup(bArr.Type, &arr.Type) + typ = arr + + case kindStruct: + str := &Struct{name, header.Size(), nil, nil} + d.types[id] = str + typ = str + + str.Members, err = convertMembers(&header, pos) + if err != nil { + return nil, fmt.Errorf("struct %s (id %d): %w", name, id, err) + } + + case kindUnion: + uni := &Union{name, header.Size(), nil, nil} + d.types[id] = uni + typ = uni + + uni.Members, err = convertMembers(&header, pos) + if err != nil { + return nil, fmt.Errorf("union %s (id %d): %w", name, id, err) + } + + case kindEnum: + enum := &Enum{name, header.Size(), header.Signed(), nil} + d.types[id] = enum + typ = enum + + var be btfEnum + enum.Values = make([]EnumValue, 0, header.Vlen()) + for i := range header.Vlen() { + n, err := unmarshalBtfEnum(&be, pos, d.byteOrder) + if err != nil { + return nil, fmt.Errorf("unmarshal btfEnum %d, id: %d: %w", i, id, err) + } + pos = pos[n:] + + name, err := d.strings.Lookup(be.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for enum value %d: %s", i, err) + } + + value := uint64(be.Val) + if enum.Signed { + // Sign extend values to 64 bit. + value = uint64(int32(be.Val)) + } + enum.Values = append(enum.Values, EnumValue{name, value}) + } + + case kindForward: + typ = &Fwd{name, header.FwdKind()} + d.types[id] = typ + + case kindTypedef: + typedef := &Typedef{name, nil, nil} + d.types[id] = typedef + + fixup(header.Type(), &typedef.Type) + typ = typedef + + case kindVolatile: + volatile := &Volatile{nil} + d.types[id] = volatile + + fixup(header.Type(), &volatile.Type) + typ = volatile + + case kindConst: + cnst := &Const{nil} + d.types[id] = cnst + + fixup(header.Type(), &cnst.Type) + typ = cnst + + case kindRestrict: + restrict := &Restrict{nil} + d.types[id] = restrict + + fixup(header.Type(), &restrict.Type) + typ = restrict + + case kindFunc: + fn := &Func{name, nil, header.Linkage(), nil, nil} + d.types[id] = fn + + fixup(header.Type(), &fn.Type) + typ = fn + + case kindFuncProto: + fp := &FuncProto{} + d.types[id] = fp + + params := make([]FuncParam, 0, header.Vlen()) + var bParam btfParam + for i := range header.Vlen() { + n, err := unmarshalBtfParam(&bParam, pos, d.byteOrder) + if err != nil { + return nil, fmt.Errorf("can't unmarshal btfParam %d, id: %d: %w", i, id, err) + } + pos = pos[n:] + + name, err := d.strings.Lookup(bParam.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err) + } + + param := FuncParam{Name: name} + fixup(bParam.Type, ¶m.Type) + params = append(params, param) + } + + fixup(header.Type(), &fp.Return) + fp.Params = params + typ = fp + + case kindVar: + if _, err := unmarshalBtfVariable(&bVariable, pos, d.byteOrder); err != nil { + return nil, fmt.Errorf("can't read btfVariable, id: %d: %w", id, err) + } + + v := &Var{name, nil, VarLinkage(bVariable.Linkage), nil} + d.types[id] = v + + fixup(header.Type(), &v.Type) + typ = v + + case kindDatasec: + ds := &Datasec{name, header.Size(), nil} + d.types[id] = ds + + vlen := header.Vlen() + vars := make([]VarSecinfo, 0, vlen) + var bSecInfo btfVarSecinfo + for i := 0; i < vlen; i++ { + n, err := unmarshalBtfVarSecInfo(&bSecInfo, pos, d.byteOrder) + if err != nil { + return nil, fmt.Errorf("can't unmarshal btfVarSecinfo %d, id: %d: %w", i, id, err) + } + pos = pos[n:] + + vs := VarSecinfo{ + Offset: bSecInfo.Offset, + Size: bSecInfo.Size, + } + fixup(bSecInfo.Type, &vs.Type) + vars = append(vars, vs) + } + ds.Vars = vars + typ = ds + + case kindFloat: + typ = &Float{name, header.Size()} + d.types[id] = typ + + case kindDeclTag: + if _, err := unmarshalBtfDeclTag(&bDeclTag, pos, d.byteOrder); err != nil { + return nil, fmt.Errorf("can't read btfDeclTag, id: %d: %w", id, err) + } + + btfIndex := bDeclTag.ComponentIdx + if uint64(btfIndex) > math.MaxInt { + return nil, fmt.Errorf("type id %d: index exceeds int", id) + } + + dt := &declTag{nil, name, int(int32(btfIndex))} + d.types[id] = dt + + fixup(header.Type(), &dt.Type) + typ = dt + + case kindTypeTag: + tt := &TypeTag{nil, name} + d.types[id] = tt + + fixup(header.Type(), &tt.Type) + typ = tt + + case kindEnum64: + enum := &Enum{name, header.Size(), header.Signed(), nil} + d.types[id] = enum + typ = enum + + enum.Values = make([]EnumValue, 0, header.Vlen()) + var bEnum64 btfEnum64 + for i := range header.Vlen() { + n, err := unmarshalBtfEnum64(&bEnum64, pos, d.byteOrder) + if err != nil { + return nil, fmt.Errorf("can't unmarshal btfEnum64 %d, id: %d: %w", i, id, err) + } + pos = pos[n:] + + name, err := d.strings.Lookup(bEnum64.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for enum64 value %d: %s", i, err) + } + value := (uint64(bEnum64.ValHi32) << 32) | uint64(bEnum64.ValLo32) + enum.Values = append(enum.Values, EnumValue{name, value}) + } + + default: + return nil, fmt.Errorf("type id %d: unknown kind: %v", id, header.Kind()) + } + } + + for _, tagID := range d.declTags[id] { + dtType, err := d.inflateType(tagID) + if err != nil { + return nil, err + } + + dt, ok := dtType.(*declTag) + if !ok { + return nil, fmt.Errorf("type id %v: not a declTag", tagID) + } + + switch t := typ.(type) { + case *Var: + if dt.Index != -1 { + return nil, fmt.Errorf("type %s: component idx %d is not -1", dt, dt.Index) + } + t.Tags = append(t.Tags, dt.Value) + + case *Typedef: + if dt.Index != -1 { + return nil, fmt.Errorf("type %s: component idx %d is not -1", dt, dt.Index) + } + t.Tags = append(t.Tags, dt.Value) + + case composite: + if dt.Index >= 0 { + members := t.members() + if dt.Index >= len(members) { + return nil, fmt.Errorf("type %s: component idx %d exceeds members of %s", dt, dt.Index, t) + } + + members[dt.Index].Tags = append(members[dt.Index].Tags, dt.Value) + } else if dt.Index == -1 { + switch t2 := t.(type) { + case *Struct: + t2.Tags = append(t2.Tags, dt.Value) + case *Union: + t2.Tags = append(t2.Tags, dt.Value) + } + } else { + return nil, fmt.Errorf("type %s: decl tag for type %s has invalid component idx", dt, t) + } + + case *Func: + fp, ok := t.Type.(*FuncProto) + if !ok { + return nil, fmt.Errorf("type %s: %s is not a FuncProto", dt, t.Type) + } + + // Ensure the number of argument tag lists equals the number of arguments + if len(t.ParamTags) == 0 { + t.ParamTags = make([][]string, len(fp.Params)) + } + + if dt.Index >= 0 { + if dt.Index >= len(fp.Params) { + return nil, fmt.Errorf("type %s: component idx %d exceeds params of %s", dt, dt.Index, t) + } + + t.ParamTags[dt.Index] = append(t.ParamTags[dt.Index], dt.Value) + } else if dt.Index == -1 { + t.Tags = append(t.Tags, dt.Value) + } else { + return nil, fmt.Errorf("type %s: decl tag for type %s has invalid component idx", dt, t) + } + + default: + return nil, fmt.Errorf("type %s: decl tag for type %s is not supported", dt, t) + } + } + + return typ, nil +} + +// An index from string to TypeID. +// +// Fuzzy because it may return false positive matches. +type fuzzyStringIndex struct { + seed maphash.Seed + entries []fuzzyStringIndexEntry +} + +func newFuzzyStringIndex(capacity int) *fuzzyStringIndex { + return &fuzzyStringIndex{ + maphash.MakeSeed(), + make([]fuzzyStringIndexEntry, 0, capacity), + } +} + +// Add a string to the index. +// +// Calling the method with identical arguments will create duplicate entries. +func (idx *fuzzyStringIndex) Add(name []byte, id TypeID) { + hash := uint32(maphash.Bytes(idx.seed, name)) + idx.entries = append(idx.entries, newFuzzyStringIndexEntry(hash, id)) +} + +// Build the index. +// +// Must be called after [Add] and before [Match]. +func (idx *fuzzyStringIndex) Build() { + slices.Sort(idx.entries) +} + +// Find TypeIDs which may match the name. +// +// May return false positives, but is guaranteed to not have false negatives. +// +// You must call [Build] at least once before calling this method. +func (idx *fuzzyStringIndex) Find(name string) iter.Seq[TypeID] { + return func(yield func(TypeID) bool) { + hash := uint32(maphash.String(idx.seed, name)) + + // We match only on the first 32 bits here, so ignore found. + i, _ := slices.BinarySearch(idx.entries, fuzzyStringIndexEntry(hash)<<32) + for i := i; i < len(idx.entries); i++ { + if idx.entries[i].hash() != hash { + break + } + + if !yield(idx.entries[i].id()) { + return + } + } + } +} + +// Tuple mapping the hash of an essential name to a type. +// +// Encoded in an uint64 so that it implements cmp.Ordered. +type fuzzyStringIndexEntry uint64 + +func newFuzzyStringIndexEntry(hash uint32, id TypeID) fuzzyStringIndexEntry { + return fuzzyStringIndexEntry(hash)<<32 | fuzzyStringIndexEntry(id) +} + +func (e fuzzyStringIndexEntry) hash() uint32 { + return uint32(e >> 32) +} + +func (e fuzzyStringIndexEntry) id() TypeID { + return TypeID(e) +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/btf/workarounds.go b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/workarounds.go new file mode 100644 index 000000000..eb09047fb --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/btf/workarounds.go @@ -0,0 +1,26 @@ +package btf + +// datasecResolveWorkaround ensures that certain vars in a Datasec are added +// to a Spec before the Datasec. This avoids a bug in kernel BTF validation. +// +// See https://lore.kernel.org/bpf/20230302123440.1193507-1-lmb@isovalent.com/ +func datasecResolveWorkaround(b *Builder, ds *Datasec) error { + for _, vsi := range ds.Vars { + v, ok := vsi.Type.(*Var) + if !ok { + continue + } + + switch v.Type.(type) { + case *Typedef, *Volatile, *Const, *Restrict, *TypeTag: + // NB: We must never call Add on a Datasec, otherwise we risk + // infinite recursion. + _, err := b.Add(v.Type) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/collection.go b/src/nvcgo/vendor/github.com/cilium/ebpf/collection.go index 7ba15e128..f99f354d4 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/collection.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/collection.go @@ -4,13 +4,19 @@ import ( "encoding/binary" "errors" "fmt" - "io" - "math" + "path/filepath" "reflect" + "runtime" + "slices" "strings" "github.com/cilium/ebpf/asm" - "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/kallsyms" + "github.com/cilium/ebpf/internal/kconfig" + "github.com/cilium/ebpf/internal/linux" + "github.com/cilium/ebpf/internal/platform" "github.com/cilium/ebpf/internal/sys" ) @@ -20,6 +26,17 @@ import ( type CollectionOptions struct { Maps MapOptions Programs ProgramOptions + + // MapReplacements takes a set of Maps that will be used instead of + // creating new ones when loading the CollectionSpec. + // + // For each given Map, there must be a corresponding MapSpec in + // CollectionSpec.Maps, and its type, key/value size, max entries and flags + // must match the values of the MapSpec. + // + // The given Maps are Clone()d before being used in the Collection, so the + // caller can Close() them freely when they are no longer needed. + MapReplacements map[string]*Map } // CollectionSpec describes a collection. @@ -27,6 +44,15 @@ type CollectionSpec struct { Maps map[string]*MapSpec Programs map[string]*ProgramSpec + // Variables refer to global variables declared in the ELF. They can be read + // and modified freely before loading the Collection. Modifying them after + // loading has no effect on a running eBPF program. + Variables map[string]*VariableSpec + + // Types holds type information about Maps and Programs. + // Modifications to Types are currently undefined behaviour. + Types *btf.Spec + // ByteOrder specifies whether the ELF was compiled for // big-endian or little-endian architectures. ByteOrder binary.ByteOrder @@ -39,41 +65,57 @@ func (cs *CollectionSpec) Copy() *CollectionSpec { } cpy := CollectionSpec{ - Maps: make(map[string]*MapSpec, len(cs.Maps)), - Programs: make(map[string]*ProgramSpec, len(cs.Programs)), + Maps: copyMapOfSpecs(cs.Maps), + Programs: copyMapOfSpecs(cs.Programs), + Variables: make(map[string]*VariableSpec, len(cs.Variables)), ByteOrder: cs.ByteOrder, + Types: cs.Types.Copy(), } - for name, spec := range cs.Maps { - cpy.Maps[name] = spec.Copy() + for name, spec := range cs.Variables { + cpy.Variables[name] = spec.copy(&cpy) } - - for name, spec := range cs.Programs { - cpy.Programs[name] = spec.Copy() + if cs.Variables == nil { + cpy.Variables = nil } return &cpy } +func copyMapOfSpecs[T interface{ Copy() T }](m map[string]T) map[string]T { + if m == nil { + return nil + } + + cpy := make(map[string]T, len(m)) + for k, v := range m { + cpy[k] = v.Copy() + } + + return cpy +} + // RewriteMaps replaces all references to specific maps. // // Use this function to use pre-existing maps instead of creating new ones // when calling NewCollection. Any named maps are removed from CollectionSpec.Maps. // // Returns an error if a named map isn't used in at least one program. +// +// Deprecated: Pass CollectionOptions.MapReplacements when loading the Collection +// instead. func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error { for symbol, m := range maps { // have we seen a program that uses this symbol / map seen := false - fd := m.FD() for progName, progSpec := range cs.Programs { - err := progSpec.Instructions.RewriteMapPtr(symbol, fd) + err := progSpec.Instructions.AssociateMap(symbol, m) switch { case err == nil: seen = true - case asm.IsUnreferencedSymbol(err): + case errors.Is(err, asm.ErrUnreferencedSymbol): // Not all programs need to use the map default: @@ -92,12 +134,22 @@ func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error { return nil } +// MissingConstantsError is returned by [CollectionSpec.RewriteConstants]. +type MissingConstantsError struct { + // The constants missing from .rodata. + Constants []string +} + +func (m *MissingConstantsError) Error() string { + return fmt.Sprintf("some constants are missing from .rodata: %s", strings.Join(m.Constants, ", ")) +} + // RewriteConstants replaces the value of multiple constants. // // The constant must be defined like so in the C program: // -// volatile const type foobar; -// volatile const type foobar = default; +// volatile const type foobar; +// volatile const type foobar = default; // // Replacement values must be of the same length as the C sizeof(type). // If necessary, they are marshalled according to the same rules as @@ -105,36 +157,32 @@ func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error { // // From Linux 5.5 the verifier will use constants to eliminate dead code. // -// Returns an error if a constant doesn't exist. +// Returns an error wrapping [MissingConstantsError] if a constant doesn't exist. +// +// Deprecated: Use [CollectionSpec.Variables] to interact with constants instead. +// RewriteConstants is now a wrapper around the VariableSpec API. func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error { - rodata := cs.Maps[".rodata"] - if rodata == nil { - return errors.New("missing .rodata section") - } - - if rodata.BTF == nil { - return errors.New(".rodata section has no BTF") - } + var missing []string + for n, c := range consts { + v, ok := cs.Variables[n] + if !ok { + missing = append(missing, n) + continue + } - if n := len(rodata.Contents); n != 1 { - return fmt.Errorf("expected one key in .rodata, found %d", n) - } + if !v.Constant() { + return fmt.Errorf("variable %s is not a constant", n) + } - kv := rodata.Contents[0] - value, ok := kv.Value.([]byte) - if !ok { - return fmt.Errorf("first value in .rodata is %T not []byte", kv.Value) + if err := v.Set(c); err != nil { + return fmt.Errorf("rewriting constant %s: %w", n, err) + } } - buf := make([]byte, len(value)) - copy(buf, value) - - err := patchValue(buf, rodata.BTF.Value, consts) - if err != nil { - return err + if len(missing) != 0 { + return fmt.Errorf("rewrite constants: %w", &MissingConstantsError{Constants: missing}) } - rodata.Contents[0] = MapKV{kv.Key, buf} return nil } @@ -145,25 +193,23 @@ func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error // if this sounds useful. // // 'to' must be a pointer to a struct. A field of the -// struct is updated with values from Programs or Maps if it -// has an `ebpf` tag and its type is *ProgramSpec or *MapSpec. +// struct is updated with values from Programs, Maps or Variables if it +// has an `ebpf` tag and its type is *ProgramSpec, *MapSpec or *VariableSpec. // The tag's value specifies the name of the program or map as // found in the CollectionSpec. // -// struct { -// Foo *ebpf.ProgramSpec `ebpf:"xdp_foo"` -// Bar *ebpf.MapSpec `ebpf:"bar_map"` -// Ignored int -// } +// struct { +// Foo *ebpf.ProgramSpec `ebpf:"xdp_foo"` +// Bar *ebpf.MapSpec `ebpf:"bar_map"` +// Var *ebpf.VariableSpec `ebpf:"some_var"` +// Ignored int +// } // // Returns an error if any of the eBPF objects can't be found, or -// if the same MapSpec or ProgramSpec is assigned multiple times. +// if the same Spec is assigned multiple times. func (cs *CollectionSpec) Assign(to interface{}) error { - // Assign() only supports assigning ProgramSpecs and MapSpecs, - // so doesn't load any resources into the kernel. getValue := func(typ reflect.Type, name string) (interface{}, error) { switch typ { - case reflect.TypeOf((*ProgramSpec)(nil)): if p := cs.Programs[name]; p != nil { return p, nil @@ -176,6 +222,12 @@ func (cs *CollectionSpec) Assign(to interface{}) error { } return nil, fmt.Errorf("missing map %q", name) + case reflect.TypeOf((*VariableSpec)(nil)): + if v := cs.Variables[name]; v != nil { + return v, nil + } + return nil, fmt.Errorf("missing variable %q", name) + default: return nil, fmt.Errorf("unsupported type %s", typ) } @@ -187,6 +239,9 @@ func (cs *CollectionSpec) Assign(to interface{}) error { // LoadAndAssign loads Maps and Programs into the kernel and assigns them // to a struct. // +// Omitting Map/Program.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. +// // This function is a shortcut to manually checking the presence // of maps and programs in a CollectionSpec. Consider using bpf2go // if this sounds useful. @@ -198,32 +253,43 @@ func (cs *CollectionSpec) Assign(to interface{}) error { // dependent resources are loaded into the kernel and populated with values if // specified. // -// struct { -// Foo *ebpf.Program `ebpf:"xdp_foo"` -// Bar *ebpf.Map `ebpf:"bar_map"` -// Ignored int -// } +// struct { +// Foo *ebpf.Program `ebpf:"xdp_foo"` +// Bar *ebpf.Map `ebpf:"bar_map"` +// Ignored int +// } // // opts may be nil. // // Returns an error if any of the fields can't be found, or // if the same Map or Program is assigned multiple times. func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) error { - loader := newCollectionLoader(cs, opts) - defer loader.cleanup() + loader, err := newCollectionLoader(cs, opts) + if err != nil { + return err + } + defer loader.close() // Support assigning Programs and Maps, lazy-loading the required objects. assignedMaps := make(map[string]bool) + assignedProgs := make(map[string]bool) + assignedVars := make(map[string]bool) + getValue := func(typ reflect.Type, name string) (interface{}, error) { switch typ { case reflect.TypeOf((*Program)(nil)): + assignedProgs[name] = true return loader.loadProgram(name) case reflect.TypeOf((*Map)(nil)): assignedMaps[name] = true return loader.loadMap(name) + case reflect.TypeOf((*Variable)(nil)): + assignedVars[name] = true + return loader.loadVariable(name) + default: return nil, fmt.Errorf("unsupported type %s", typ) } @@ -235,14 +301,13 @@ func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) } // Populate the requested maps. Has a chance of lazy-loading other dependent maps. - if err := loader.populateMaps(); err != nil { + if err := loader.populateDeferredMaps(); err != nil { return err } // Evaluate the loader's objects after all (lazy)loading has taken place. for n, m := range loader.maps { - switch m.typ { - case ProgramArray: + if m.typ.canStoreProgram() { // Require all lazy-loaded ProgramArrays to be assigned to the given object. // The kernel empties a ProgramArray once the last user space reference // to it closes, which leads to failed tail calls. Combined with the library @@ -257,27 +322,51 @@ func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) } } - loader.finalize() + // Prevent loader.cleanup() from closing assigned Maps and Programs. + for m := range assignedMaps { + delete(loader.maps, m) + } + for p := range assignedProgs { + delete(loader.programs, p) + } + for p := range assignedVars { + delete(loader.vars, p) + } return nil } -// Collection is a collection of Programs and Maps associated -// with their symbols +// Collection is a collection of live BPF resources present in the kernel. type Collection struct { Programs map[string]*Program Maps map[string]*Map + + // Variables contains global variables used by the Collection's program(s). On + // kernels older than 5.5, most interactions with Variables return + // [ErrNotSupported]. + Variables map[string]*Variable } -// NewCollection creates a Collection from a specification. +// NewCollection creates a Collection from the given spec, creating and +// loading its declared resources into the kernel. +// +// Omitting Collection.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. func NewCollection(spec *CollectionSpec) (*Collection, error) { return NewCollectionWithOptions(spec, CollectionOptions{}) } -// NewCollectionWithOptions creates a Collection from a specification. +// NewCollectionWithOptions creates a Collection from the given spec using +// options, creating and loading its declared resources into the kernel. +// +// Omitting Collection.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Collection, error) { - loader := newCollectionLoader(spec, &opts) - defer loader.cleanup() + loader, err := newCollectionLoader(spec, &opts) + if err != nil { + return nil, err + } + defer loader.close() // Create maps first, as their fds need to be linked into programs. for mapName := range spec.Maps { @@ -296,101 +385,91 @@ func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Co } } + for varName := range spec.Variables { + if _, err := loader.loadVariable(varName); err != nil { + return nil, err + } + } + // Maps can contain Program and Map stubs, so populate them after // all Maps and Programs have been successfully loaded. - if err := loader.populateMaps(); err != nil { + if err := loader.populateDeferredMaps(); err != nil { return nil, err } - maps, progs := loader.maps, loader.programs - - loader.finalize() + // Prevent loader.cleanup from closing maps, programs and vars. + maps, progs, vars := loader.maps, loader.programs, loader.vars + loader.maps, loader.programs, loader.vars = nil, nil, nil return &Collection{ progs, maps, + vars, }, nil } -type handleCache struct { - btfHandles map[*btf.Spec]*btf.Handle - btfSpecs map[io.ReaderAt]*btf.Spec -} - -func newHandleCache() *handleCache { - return &handleCache{ - btfHandles: make(map[*btf.Spec]*btf.Handle), - btfSpecs: make(map[io.ReaderAt]*btf.Spec), - } -} - -func (hc handleCache) btfHandle(spec *btf.Spec) (*btf.Handle, error) { - if hc.btfHandles[spec] != nil { - return hc.btfHandles[spec], nil - } - - handle, err := btf.NewHandle(spec) - if err != nil { - return nil, err - } - - hc.btfHandles[spec] = handle - return handle, nil -} - -func (hc handleCache) btfSpec(rd io.ReaderAt) (*btf.Spec, error) { - if hc.btfSpecs[rd] != nil { - return hc.btfSpecs[rd], nil - } - - spec, err := btf.LoadSpecFromReader(rd) - if err != nil { - return nil, err - } - - hc.btfSpecs[rd] = spec - return spec, nil -} - -func (hc handleCache) close() { - for _, handle := range hc.btfHandles { - handle.Close() - } -} - type collectionLoader struct { coll *CollectionSpec opts *CollectionOptions maps map[string]*Map programs map[string]*Program - handles *handleCache + vars map[string]*Variable + types *btf.Cache } -func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) *collectionLoader { +func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) (*collectionLoader, error) { if opts == nil { opts = &CollectionOptions{} } + // Check for existing MapSpecs in the CollectionSpec for all provided replacement maps. + for name := range opts.MapReplacements { + if _, ok := coll.Maps[name]; !ok { + return nil, fmt.Errorf("replacement map %s not found in CollectionSpec", name) + } + } + + if err := populateKallsyms(coll.Programs); err != nil { + return nil, fmt.Errorf("populating kallsyms caches: %w", err) + } + return &collectionLoader{ coll, opts, make(map[string]*Map), make(map[string]*Program), - newHandleCache(), - } + make(map[string]*Variable), + btf.NewCache(), + }, nil } -// finalize should be called when all the collectionLoader's resources -// have been successfully loaded into the kernel and populated with values. -func (cl *collectionLoader) finalize() { - cl.maps, cl.programs = nil, nil +// populateKallsyms populates kallsyms caches, making lookups cheaper later on +// during individual program loading. Since we have less context available +// at those stages, we batch the lookups here instead to avoid redundant work. +func populateKallsyms(progs map[string]*ProgramSpec) error { + // Look up addresses of all kernel symbols referenced by all programs. + addrs := make(map[string]uint64) + for _, p := range progs { + iter := p.Instructions.Iterate() + for iter.Next() { + ins := iter.Ins + meta, _ := ins.Metadata.Get(ksymMetaKey{}).(*ksymMeta) + if meta != nil { + addrs[meta.Name] = 0 + } + } + } + if len(addrs) != 0 { + if err := kallsyms.AssignAddresses(addrs); err != nil { + return fmt.Errorf("getting addresses from kallsyms: %w", err) + } + } + + return nil } -// cleanup cleans up all resources left over in the collectionLoader. -// Call finalize() when Map and Program creation/population is successful -// to prevent them from getting closed. -func (cl *collectionLoader) cleanup() { - cl.handles.close() +// close all resources left over in the collectionLoader. +func (cl *collectionLoader) close() { for _, m := range cl.maps { m.Close() } @@ -409,11 +488,47 @@ func (cl *collectionLoader) loadMap(mapName string) (*Map, error) { return nil, fmt.Errorf("missing map %s", mapName) } - m, err := newMapWithOptions(mapSpec, cl.opts.Maps, cl.handles) + mapSpec = mapSpec.Copy() + + // Defer setting the mmapable flag on maps until load time. This avoids the + // MapSpec having different flags on some kernel versions. Also avoid running + // syscalls during ELF loading, so platforms like wasm can also parse an ELF. + if isDataSection(mapSpec.Name) && haveMmapableMaps() == nil { + mapSpec.Flags |= sys.BPF_F_MMAPABLE + } + + if replaceMap, ok := cl.opts.MapReplacements[mapName]; ok { + // Check compatibility with the replacement map after setting + // feature-dependent map flags. + if err := mapSpec.Compatible(replaceMap); err != nil { + return nil, fmt.Errorf("using replacement map %s: %w", mapSpec.Name, err) + } + + // Clone the map to avoid closing user's map later on. + m, err := replaceMap.Clone() + if err != nil { + return nil, err + } + + cl.maps[mapName] = m + return m, nil + } + + m, err := newMapWithOptions(mapSpec, cl.opts.Maps, cl.types) if err != nil { return nil, fmt.Errorf("map %s: %w", mapName, err) } + // Finalize 'scalar' maps that don't refer to any other eBPF resources + // potentially pending creation. This is needed for frozen maps like .rodata + // that need to be finalized before invoking the verifier. + if !mapSpec.Type.canStoreMapOrProgram() { + if err := m.finalize(mapSpec); err != nil { + _ = m.Close() + return nil, fmt.Errorf("finalizing map %s: %w", mapName, err) + } + } + cl.maps[mapName] = m return m, nil } @@ -441,69 +556,148 @@ func (cl *collectionLoader) loadProgram(progName string) (*Program, error) { for i := range progSpec.Instructions { ins := &progSpec.Instructions[i] - if !ins.IsLoadFromMap() || ins.Reference == "" { + if !ins.IsLoadFromMap() || ins.Reference() == "" { continue } - if uint32(ins.Constant) != math.MaxUint32 { - // Don't overwrite maps already rewritten, users can - // rewrite programs in the spec themselves + // Don't overwrite map loads containing non-zero map fd's, + // they can be manually included by the caller. + // Map FDs/IDs are placed in the lower 32 bits of Constant. + if int32(ins.Constant) > 0 { continue } - m, err := cl.loadMap(ins.Reference) + m, err := cl.loadMap(ins.Reference()) if err != nil { return nil, fmt.Errorf("program %s: %w", progName, err) } - fd := m.FD() - if fd < 0 { - return nil, fmt.Errorf("map %s: %w", ins.Reference, sys.ErrClosedFd) - } - if err := ins.RewriteMapPtr(m.FD()); err != nil { - return nil, fmt.Errorf("program %s: map %s: %w", progName, ins.Reference, err) + if err := ins.AssociateMap(m); err != nil { + return nil, fmt.Errorf("program %s: map %s: %w", progName, ins.Reference(), err) } } - prog, err := newProgramWithOptions(progSpec, cl.opts.Programs, cl.handles) + prog, err := newProgramWithOptions(progSpec, cl.opts.Programs, cl.types) if err != nil { return nil, fmt.Errorf("program %s: %w", progName, err) } cl.programs[progName] = prog + return prog, nil } -func (cl *collectionLoader) populateMaps() error { +func (cl *collectionLoader) loadVariable(varName string) (*Variable, error) { + if v := cl.vars[varName]; v != nil { + return v, nil + } + + varSpec := cl.coll.Variables[varName] + if varSpec == nil { + return nil, fmt.Errorf("unknown variable %s", varName) + } + + // Get the key of the VariableSpec's MapSpec in the CollectionSpec. + var mapName string + for n, ms := range cl.coll.Maps { + if ms == varSpec.m { + mapName = n + break + } + } + if mapName == "" { + return nil, fmt.Errorf("variable %s: underlying MapSpec %s was removed from CollectionSpec", varName, varSpec.m.Name) + } + + m, err := cl.loadMap(mapName) + if err != nil { + return nil, fmt.Errorf("variable %s: %w", varName, err) + } + + // If the kernel is too old or the underlying map was created without + // BPF_F_MMAPABLE, [Map.Memory] will return ErrNotSupported. In this case, + // emit a Variable with a nil Memory. This keeps Collection{Spec}.Variables + // consistent across systems with different feature sets without breaking + // LoadAndAssign. + var mm *Memory + if unsafeMemory { + mm, err = m.unsafeMemory() + } else { + mm, err = m.Memory() + } + if err != nil && !errors.Is(err, ErrNotSupported) { + return nil, fmt.Errorf("variable %s: getting memory for map %s: %w", varName, mapName, err) + } + + v, err := newVariable( + varSpec.name, + varSpec.offset, + varSpec.size, + varSpec.t, + mm, + ) + if err != nil { + return nil, fmt.Errorf("variable %s: %w", varName, err) + } + + cl.vars[varName] = v + return v, nil +} + +// populateDeferredMaps iterates maps holding programs or other maps and loads +// any dependencies. Populates all maps in cl and freezes them if specified. +func (cl *collectionLoader) populateDeferredMaps() error { for mapName, m := range cl.maps { mapSpec, ok := cl.coll.Maps[mapName] if !ok { return fmt.Errorf("missing map spec %s", mapName) } + // Scalar maps without Map or Program references are finalized during + // creation. Don't finalize them again. + if !mapSpec.Type.canStoreMapOrProgram() { + continue + } + mapSpec = mapSpec.Copy() - // Replace any object stubs with loaded objects. + // MapSpecs that refer to inner maps or programs within the same + // CollectionSpec do so using strings. These strings are used as the key + // to look up the respective object in the Maps or Programs fields. + // Resolve those references to actual Map or Program resources that + // have been loaded into the kernel. for i, kv := range mapSpec.Contents { - switch v := kv.Value.(type) { - case programStub: + objName, ok := kv.Value.(string) + if !ok { + continue + } + + switch t := mapSpec.Type; { + case t.canStoreProgram(): // loadProgram is idempotent and could return an existing Program. - prog, err := cl.loadProgram(string(v)) + prog, err := cl.loadProgram(objName) if err != nil { - return fmt.Errorf("loading program %s, for map %s: %w", v, mapName, err) + return fmt.Errorf("loading program %s, for map %s: %w", objName, mapName, err) } mapSpec.Contents[i] = MapKV{kv.Key, prog} - case mapStub: + case t.canStoreMap(): // loadMap is idempotent and could return an existing Map. - innerMap, err := cl.loadMap(string(v)) + innerMap, err := cl.loadMap(objName) if err != nil { - return fmt.Errorf("loading inner map %s, for map %s: %w", v, mapName, err) + return fmt.Errorf("loading inner map %s, for map %s: %w", objName, mapName, err) } mapSpec.Contents[i] = MapKV{kv.Key, innerMap} } } + if mapSpec.Type == StructOpsMap { + // populate StructOps data into `kernVData` + if err := cl.populateStructOps(m, mapSpec); err != nil { + return err + } + } + // Populate and freeze the map if specified. if err := m.finalize(mapSpec); err != nil { return fmt.Errorf("populating map %s: %w", mapName, err) @@ -513,8 +707,208 @@ func (cl *collectionLoader) populateMaps() error { return nil } -// LoadCollection parses an object file and converts it to a collection. +// populateStructOps translates the user struct bytes into the kernel value struct +// layout for a struct_ops map and writes the result back to mapSpec.Contents[0]. +func (cl *collectionLoader) populateStructOps(m *Map, mapSpec *MapSpec) error { + userType, ok := btf.As[*btf.Struct](mapSpec.Value) + if !ok { + return fmt.Errorf("value should be a *Struct") + } + + userData, ok := mapSpec.Contents[0].Value.([]byte) + if !ok { + return fmt.Errorf("value should be an array of byte") + } + if len(userData) < int(userType.Size) { + return fmt.Errorf("user data too short: have %d, need at least %d", len(userData), userType.Size) + } + + vType, _, module, err := structOpsFindTarget(userType, cl.types) + if err != nil { + return fmt.Errorf("struct_ops value type %q: %w", userType.Name, err) + } + defer module.Close() + + // Find the inner ops struct embedded in the value struct. + kType, kTypeOff, err := structOpsFindInnerType(vType) + if err != nil { + return err + } + + kernVData := make([]byte, int(vType.Size)) + for _, m := range userType.Members { + i := slices.IndexFunc(kType.Members, func(km btf.Member) bool { + return km.Name == m.Name + }) + + // Allow field to not exist in target as long as the source is zero. + if i == -1 { + mSize, err := btf.Sizeof(m.Type) + if err != nil { + return fmt.Errorf("sizeof(user.%s): %w", m.Name, err) + } + srcOff := int(m.Offset.Bytes()) + if srcOff < 0 || srcOff+mSize > len(userData) { + return fmt.Errorf("member %q: userdata is too small", m.Name) + } + + // let fail if the field in type user type is missing in type kern type + if !structOpsIsMemZeroed(userData[srcOff : srcOff+mSize]) { + return fmt.Errorf("%s doesn't exist in %s, but it has non-zero value", m.Name, kType.Name) + } + + continue + } + + km := kType.Members[i] + + switch btf.UnderlyingType(m.Type).(type) { + case *btf.Pointer: + // If this is a pointer → resolve struct_ops program. + psKey := kType.Name + ":" + m.Name + for k, ps := range cl.coll.Programs { + if ps.AttachTo == psKey { + p, ok := cl.programs[k] + if !ok || p == nil { + return nil + } + if err := structOpsPopulateValue(km, kernVData[kTypeOff:], p); err != nil { + return err + } + } + } + + default: + // Otherwise → memcpy the field contents. + if err := structOpsCopyMember(m, km, userData, kernVData[kTypeOff:]); err != nil { + return fmt.Errorf("field %s: %w", kType.Name, err) + } + } + } + + // Populate the map explicitly and keep a reference on cl.programs. + // This is necessary because we may inline fds into kernVData which + // may become invalid if the GC frees them. + if err := m.Put(uint32(0), kernVData); err != nil { + return err + } + mapSpec.Contents = nil + runtime.KeepAlive(cl.programs) + + return nil +} + +// resolveKconfig resolves all variables declared in .kconfig and populates +// m.Contents. Does nothing if the given m.Contents is non-empty. +func resolveKconfig(m *MapSpec) error { + ds, ok := m.Value.(*btf.Datasec) + if !ok { + return errors.New("map value is not a Datasec") + } + + if platform.IsWindows { + return fmt.Errorf(".kconfig: %w", internal.ErrNotSupportedOnOS) + } + + type configInfo struct { + offset uint32 + size uint32 + typ btf.Type + } + + configs := make(map[string]configInfo) + + data := make([]byte, ds.Size) + for _, vsi := range ds.Vars { + v := vsi.Type.(*btf.Var) + n := v.TypeName() + + switch n { + case "LINUX_KERNEL_VERSION": + if integer, ok := v.Type.(*btf.Int); !ok || integer.Size != 4 { + return fmt.Errorf("variable %s must be a 32 bits integer, got %s", n, v.Type) + } + + kv, err := linux.KernelVersion() + if err != nil { + return fmt.Errorf("getting kernel version: %w", err) + } + internal.NativeEndian.PutUint32(data[vsi.Offset:], kv.Kernel()) + + case "LINUX_HAS_SYSCALL_WRAPPER": + integer, ok := v.Type.(*btf.Int) + if !ok { + return fmt.Errorf("variable %s must be an integer, got %s", n, v.Type) + } + var value uint64 = 1 + if err := haveSyscallWrapper(); errors.Is(err, ErrNotSupported) { + value = 0 + } else if err != nil { + return fmt.Errorf("unable to derive a value for LINUX_HAS_SYSCALL_WRAPPER: %w", err) + } + + if err := kconfig.PutInteger(data[vsi.Offset:], integer, value); err != nil { + return fmt.Errorf("set LINUX_HAS_SYSCALL_WRAPPER: %w", err) + } + + default: // Catch CONFIG_*. + configs[n] = configInfo{ + offset: vsi.Offset, + size: vsi.Size, + typ: v.Type, + } + } + } + + // We only parse kconfig file if a CONFIG_* variable was found. + if len(configs) > 0 { + f, err := linux.FindKConfig() + if err != nil { + return fmt.Errorf("cannot find a kconfig file: %w", err) + } + defer f.Close() + + filter := make(map[string]struct{}, len(configs)) + for config := range configs { + filter[config] = struct{}{} + } + + kernelConfig, err := kconfig.Parse(f, filter) + if err != nil { + return fmt.Errorf("cannot parse kconfig file: %w", err) + } + + for n, info := range configs { + value, ok := kernelConfig[n] + if !ok { + return fmt.Errorf("config option %q does not exist on this kernel", n) + } + + err := kconfig.PutValue(data[info.offset:info.offset+info.size], info.typ, value) + if err != nil { + return fmt.Errorf("problem adding value for %s: %w", n, err) + } + } + } + + m.Contents = []MapKV{{uint32(0), data}} + + return nil +} + +// LoadCollection reads an object file and creates and loads its declared +// resources into the kernel. +// +// Omitting Collection.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. func LoadCollection(file string) (*Collection, error) { + if platform.IsWindows { + // This mirrors a check in efW. + if ext := filepath.Ext(file); ext == ".sys" { + return loadCollectionFromNativeImage(file) + } + } + spec, err := LoadCollectionSpec(file) if err != nil { return nil, err @@ -522,6 +916,82 @@ func LoadCollection(file string) (*Collection, error) { return NewCollection(spec) } +// Assign the contents of a Collection to a struct. +// +// This function bridges functionality between bpf2go generated +// code and any functionality better implemented in Collection. +// +// 'to' must be a pointer to a struct. A field of the +// struct is updated with values from Programs or Maps if it +// has an `ebpf` tag and its type is *Program or *Map. +// The tag's value specifies the name of the program or map as +// found in the CollectionSpec. +// +// struct { +// Foo *ebpf.Program `ebpf:"xdp_foo"` +// Bar *ebpf.Map `ebpf:"bar_map"` +// Ignored int +// } +// +// Returns an error if any of the eBPF objects can't be found, or +// if the same Map or Program is assigned multiple times. +// +// Ownership and Close()ing responsibility is transferred to `to` +// for any successful assigns. On error `to` is left in an undefined state. +func (coll *Collection) Assign(to interface{}) error { + assignedMaps := make(map[string]bool) + assignedProgs := make(map[string]bool) + assignedVars := make(map[string]bool) + + // Assign() only transfers already-loaded Maps and Programs. No extra + // loading is done. + getValue := func(typ reflect.Type, name string) (interface{}, error) { + switch typ { + + case reflect.TypeOf((*Program)(nil)): + if p := coll.Programs[name]; p != nil { + assignedProgs[name] = true + return p, nil + } + return nil, fmt.Errorf("missing program %q", name) + + case reflect.TypeOf((*Map)(nil)): + if m := coll.Maps[name]; m != nil { + assignedMaps[name] = true + return m, nil + } + return nil, fmt.Errorf("missing map %q", name) + + case reflect.TypeOf((*Variable)(nil)): + if v := coll.Variables[name]; v != nil { + assignedVars[name] = true + return v, nil + } + return nil, fmt.Errorf("missing variable %q", name) + + default: + return nil, fmt.Errorf("unsupported type %s", typ) + } + } + + if err := assignValues(to, getValue); err != nil { + return err + } + + // Finalize ownership transfer + for p := range assignedProgs { + delete(coll.Programs, p) + } + for m := range assignedMaps { + delete(coll.Maps, m) + } + for s := range assignedVars { + delete(coll.Variables, s) + } + + return nil +} + // Close frees all maps and programs associated with the collection. // // The collection mustn't be used afterwards. diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/collection_other.go b/src/nvcgo/vendor/github.com/cilium/ebpf/collection_other.go new file mode 100644 index 000000000..0e69bb83a --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/collection_other.go @@ -0,0 +1,9 @@ +//go:build !windows + +package ebpf + +import "github.com/cilium/ebpf/internal" + +func loadCollectionFromNativeImage(_ string) (*Collection, error) { + return nil, internal.ErrNotSupportedOnOS +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/collection_windows.go b/src/nvcgo/vendor/github.com/cilium/ebpf/collection_windows.go new file mode 100644 index 000000000..c1bbaa21d --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/collection_windows.go @@ -0,0 +1,136 @@ +package ebpf + +import ( + "errors" + "fmt" + "unsafe" + + "github.com/cilium/ebpf/internal/efw" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +func loadCollectionFromNativeImage(file string) (_ *Collection, err error) { + mapFds := make([]efw.FD, 16) + programFds := make([]efw.FD, 16) + var maps map[string]*Map + var programs map[string]*Program + + defer func() { + if err == nil { + return + } + + for _, fd := range append(mapFds, programFds...) { + // efW never uses fd 0. + if fd != 0 { + _ = efw.EbpfCloseFd(int(fd)) + } + } + + for _, m := range maps { + _ = m.Close() + } + + for _, p := range programs { + _ = p.Close() + } + }() + + nMaps, nPrograms, err := efw.EbpfObjectLoadNativeFds(file, mapFds, programFds) + if errors.Is(err, efw.EBPF_NO_MEMORY) && (nMaps > len(mapFds) || nPrograms > len(programFds)) { + mapFds = make([]efw.FD, nMaps) + programFds = make([]efw.FD, nPrograms) + + nMaps, nPrograms, err = efw.EbpfObjectLoadNativeFds(file, mapFds, programFds) + } + if err != nil { + return nil, err + } + + mapFds = mapFds[:nMaps] + programFds = programFds[:nPrograms] + + // The maximum length of a name is only 16 bytes on Linux, longer names + // are truncated. This is not a problem when loading from an ELF, since + // we get the full object name from the symbol table. + // When loading a native image we do not have this luxury. Use an efW native + // API to retrieve up to 64 bytes of the object name. + + maps = make(map[string]*Map, len(mapFds)) + for _, raw := range mapFds { + fd, err := sys.NewFD(int(raw)) + if err != nil { + return nil, err + } + + m, mapErr := newMapFromFD(fd) + if mapErr != nil { + _ = fd.Close() + return nil, mapErr + } + + var efwMapInfo efw.BpfMapInfo + size := uint32(unsafe.Sizeof(efwMapInfo)) + _, err = efw.EbpfObjectGetInfoByFd(m.FD(), unsafe.Pointer(&efwMapInfo), &size) + if err != nil { + _ = m.Close() + return nil, err + } + + if size >= uint32(unsafe.Offsetof(efwMapInfo.Name)+unsafe.Sizeof(efwMapInfo.Name)) { + m.name = unix.ByteSliceToString(efwMapInfo.Name[:]) + } + + if m.name == "" { + _ = m.Close() + return nil, fmt.Errorf("unnamed map") + } + + if _, ok := maps[m.name]; ok { + return nil, fmt.Errorf("duplicate map with the same name: %s", m.name) + } + + maps[m.name] = m + } + + programs = make(map[string]*Program, len(programFds)) + for _, raw := range programFds { + fd, err := sys.NewFD(int(raw)) + if err != nil { + return nil, err + } + + program, err := newProgramFromFD(fd) + if err != nil { + _ = fd.Close() + return nil, err + } + + var efwProgInfo efw.BpfProgInfo + size := uint32(unsafe.Sizeof(efwProgInfo)) + _, err = efw.EbpfObjectGetInfoByFd(program.FD(), unsafe.Pointer(&efwProgInfo), &size) + if err != nil { + _ = program.Close() + return nil, err + } + + if size >= uint32(unsafe.Offsetof(efwProgInfo.Name)+unsafe.Sizeof(efwProgInfo.Name)) { + program.name = unix.ByteSliceToString(efwProgInfo.Name[:]) + } + + if program.name == "" { + _ = program.Close() + return nil, fmt.Errorf("unnamed program") + } + + if _, ok := programs[program.name]; ok { + _ = program.Close() + return nil, fmt.Errorf("duplicate program with the same name: %s", program.name) + } + + programs[program.name] = program + } + + return &Collection{programs, maps, nil}, nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/cpu.go b/src/nvcgo/vendor/github.com/cilium/ebpf/cpu.go new file mode 100644 index 000000000..3bcdc386d --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/cpu.go @@ -0,0 +1,17 @@ +package ebpf + +// PossibleCPU returns the max number of CPUs a system may possibly have +// Logical CPU numbers must be of the form 0-n +func PossibleCPU() (int, error) { + return possibleCPU() +} + +// MustPossibleCPU is a helper that wraps a call to PossibleCPU and panics if +// the error is non-nil. +func MustPossibleCPU() int { + cpus, err := PossibleCPU() + if err != nil { + panic(err) + } + return cpus +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/cpu_other.go b/src/nvcgo/vendor/github.com/cilium/ebpf/cpu_other.go new file mode 100644 index 000000000..eca5164c1 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/cpu_other.go @@ -0,0 +1,13 @@ +//go:build !windows + +package ebpf + +import ( + "sync" + + "github.com/cilium/ebpf/internal/linux" +) + +var possibleCPU = sync.OnceValues(func() (int, error) { + return linux.ParseCPUsFromFile("/sys/devices/system/cpu/possible") +}) diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/cpu_windows.go b/src/nvcgo/vendor/github.com/cilium/ebpf/cpu_windows.go new file mode 100644 index 000000000..9448b0916 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/cpu_windows.go @@ -0,0 +1,11 @@ +package ebpf + +import ( + "sync" + + "golang.org/x/sys/windows" +) + +var possibleCPU = sync.OnceValues(func() (int, error) { + return int(windows.GetMaximumProcessorCount(windows.ALL_PROCESSOR_GROUPS)), nil +}) diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/doc.go b/src/nvcgo/vendor/github.com/cilium/ebpf/doc.go index f7f34da8f..396b3394d 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/doc.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/doc.go @@ -13,4 +13,13 @@ // your application as any other resource. // // Use the link subpackage to attach a loaded program to a hook in the kernel. +// +// Note that losing all references to Map and Program resources will cause +// their underlying file descriptors to be closed, potentially removing those +// objects from the kernel. Always retain a reference by e.g. deferring a +// Close() of a Collection or LoadAndAssign object until application exit. +// +// Special care needs to be taken when handling maps of type ProgramArray, +// as the kernel erases its contents when the last userspace or bpffs +// reference disappears, regardless of the map being in active use. package ebpf diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/elf_reader.go b/src/nvcgo/vendor/github.com/cilium/ebpf/elf_reader.go index ac4496b26..f2c9196b7 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/elf_reader.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/elf_reader.go @@ -10,14 +10,37 @@ import ( "io" "math" "os" + "slices" "strings" "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/btf" - "github.com/cilium/ebpf/internal/unix" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/sys" ) +type kconfigMetaKey struct{} + +type kconfigMeta struct { + Map *MapSpec + Offset uint32 +} + +type kfuncMetaKey struct{} + +type kfuncMeta struct { + Binding elf.SymBind + Func *btf.Func +} + +type ksymMetaKey struct{} + +type ksymMeta struct { + Binding elf.SymBind + Name string +} + // elfCode is a convenience to reduce the amount of arguments that have to // be passed around explicitly. You should treat its contents as immutable. type elfCode struct { @@ -26,6 +49,12 @@ type elfCode struct { license string version uint32 btf *btf.Spec + extInfo *btf.ExtInfos + maps map[string]*MapSpec + vars map[string]*VariableSpec + kfuncs map[string]*btf.Func + ksyms map[string]struct{} + kconfig *MapSpec } // LoadCollectionSpec parses an ELF file into a CollectionSpec. @@ -49,7 +78,12 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { if err != nil { return nil, err } - defer f.Close() + + // Checks if the ELF file is for BPF data. + // Old LLVM versions set e_machine to EM_NONE. + if f.Machine != elf.EM_NONE && f.Machine != elf.EM_BPF { + return nil, fmt.Errorf("unexpected machine type for BPF ELF: %s", f.Machine) + } var ( licenseSection *elf.Section @@ -63,6 +97,8 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { // Collect all the sections we're interested in. This includes relocations // which we parse later. + // + // Keep the documentation at docs/ebpf/loading/elf-sections.md up-to-date. for i, sec := range f.Sections { idx := elf.SectionIndex(i) @@ -75,7 +111,7 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { sections[idx] = newElfSection(sec, mapSection) case sec.Name == ".maps": sections[idx] = newElfSection(sec, btfMapSection) - case sec.Name == ".bss" || sec.Name == ".data" || strings.HasPrefix(sec.Name, ".rodata"): + case isDataSection(sec.Name): sections[idx] = newElfSection(sec, dataSection) case sec.Type == elf.SHT_REL: // Store relocations under the section index of the target @@ -95,7 +131,7 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { return nil, fmt.Errorf("load version: %w", err) } - btfSpec, err := btf.LoadSpecFromReader(rd) + btfSpec, btfExtInfo, err := btf.LoadSpecAndExtInfosFromReader(rd) if err != nil && !errors.Is(err, btf.ErrNotFound) { return nil, fmt.Errorf("load BTF: %w", err) } @@ -106,6 +142,11 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { license: license, version: version, btf: btfSpec, + extInfo: btfExtInfo, + maps: make(map[string]*MapSpec), + vars: make(map[string]*VariableSpec), + kfuncs: make(map[string]*btf.Func), + ksyms: make(map[string]struct{}), } symbols, err := f.Symbols() @@ -115,56 +156,43 @@ func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { ec.assignSymbols(symbols) - // Go through relocation sections, and parse the ones for sections we're - // interested in. Make sure that relocations point at valid sections. - for idx, relSection := range relSections { - section := sections[idx] - if section == nil { - continue - } - - rels, err := ec.loadRelocations(relSection, symbols) - if err != nil { - return nil, fmt.Errorf("relocation for section %q: %w", section.Name, err) - } - - for _, rel := range rels { - target := sections[rel.Section] - if target == nil { - return nil, fmt.Errorf("section %q: reference to %q in section %s: %w", section.Name, rel.Name, rel.Section, ErrNotSupported) - } - - if target.Flags&elf.SHF_STRINGS > 0 { - return nil, fmt.Errorf("section %q: string is not stack allocated: %w", section.Name, ErrNotSupported) - } - - target.references++ - } - - section.relocations = rels + if err := ec.loadRelocations(relSections, symbols); err != nil { + return nil, fmt.Errorf("load relocations: %w", err) } - // Collect all the various ways to define maps. - maps := make(map[string]*MapSpec) - if err := ec.loadMaps(maps); err != nil { + if err := ec.loadMaps(); err != nil { return nil, fmt.Errorf("load maps: %w", err) } - if err := ec.loadBTFMaps(maps); err != nil { + if err := ec.loadBTFMaps(); err != nil { return nil, fmt.Errorf("load BTF maps: %w", err) } - if err := ec.loadDataSections(maps); err != nil { + if err := ec.loadDataSections(); err != nil { return nil, fmt.Errorf("load data sections: %w", err) } + if err := ec.loadKconfigSection(); err != nil { + return nil, fmt.Errorf("load virtual .kconfig section: %w", err) + } + + if err := ec.loadKsymsSection(); err != nil { + return nil, fmt.Errorf("load virtual .ksyms section: %w", err) + } + // Finally, collect programs and link them. progs, err := ec.loadProgramSections() if err != nil { return nil, fmt.Errorf("load programs: %w", err) } - return &CollectionSpec{maps, progs, ec.ByteOrder}, nil + return &CollectionSpec{ + ec.maps, + progs, + ec.vars, + btfSpec, + ec.ByteOrder, + }, nil } func loadLicense(sec *elf.Section) (string, error) { @@ -191,6 +219,18 @@ func loadVersion(sec *elf.Section, bo binary.ByteOrder) (uint32, error) { return version, nil } +func isDataSection(name string) bool { + return name == ".bss" || strings.HasPrefix(name, ".data") || strings.HasPrefix(name, ".rodata") +} + +func isConstantDataSection(name string) bool { + return strings.HasPrefix(name, ".rodata") +} + +func isKconfigSection(name string) bool { + return name == ".kconfig" +} + type elfSectionKind int const ( @@ -265,6 +305,35 @@ func (ec *elfCode) assignSymbols(symbols []elf.Symbol) { } } +// loadRelocations iterates .rel* sections and extracts relocation entries for +// sections of interest. Makes sure relocations point at valid sections. +func (ec *elfCode) loadRelocations(relSections map[elf.SectionIndex]*elf.Section, symbols []elf.Symbol) error { + for idx, relSection := range relSections { + section := ec.sections[idx] + if section == nil { + continue + } + + rels, err := ec.loadSectionRelocations(relSection, symbols) + if err != nil { + return fmt.Errorf("relocation for section %q: %w", section.Name, err) + } + + for _, rel := range rels { + target := ec.sections[rel.Section] + if target == nil { + return fmt.Errorf("section %q: reference to %q in section %s: %w", section.Name, rel.Name, rel.Section, ErrNotSupported) + } + + target.references++ + } + + section.relocations = rels + } + + return nil +} + // loadProgramSections iterates ec's sections and emits a ProgramSpec // for each function it finds. // @@ -274,6 +343,7 @@ func (ec *elfCode) loadProgramSections() (map[string]*ProgramSpec, error) { progs := make(map[string]*ProgramSpec) // Generate a ProgramSpec for each function found in each program section. + var export []string for _, sec := range ec.sections { if sec.kind != programSection { continue @@ -304,25 +374,19 @@ func (ec *elfCode) loadProgramSections() (map[string]*ProgramSpec, error) { ByteOrder: ec.ByteOrder, } - if ec.btf != nil { - spec.BTF, err = ec.btf.Program(name) - if err != nil && !errors.Is(err, btf.ErrNoExtendedInfo) { - return nil, fmt.Errorf("program %s: %w", name, err) - } - } - // Function names must be unique within a single ELF blob. if progs[name] != nil { return nil, fmt.Errorf("duplicate program name %s", name) } progs[name] = spec + + if spec.SectionName != ".text" { + export = append(export, name) + } } } - // Populate each prog's references with pointers to all of its callees. - if err := populateReferences(progs); err != nil { - return nil, fmt.Errorf("populating references: %w", err) - } + flattenPrograms(progs, export) // Hide programs (e.g. library functions) that were not explicitly emitted // to an ELF section. These could be exposed in a separate CollectionSpec @@ -342,73 +406,73 @@ func (ec *elfCode) loadProgramSections() (map[string]*ProgramSpec, error) { // // The resulting map is indexed by function name. func (ec *elfCode) loadFunctions(section *elfSection) (map[string]asm.Instructions, error) { - var ( - r = bufio.NewReader(section.Open()) - funcs = make(map[string]asm.Instructions) - offset uint64 - insns asm.Instructions - ) - for { - ins := asm.Instruction{ - // Symbols denote the first instruction of a function body. - Symbol: section.symbols[offset].Name, - } + r := bufio.NewReader(section.Open()) - // Pull one instruction from the instruction stream. - n, err := ins.Unmarshal(r, ec.ByteOrder) - if errors.Is(err, io.EOF) { - fn := insns.Name() - if fn == "" { - return nil, errors.New("reached EOF before finding a valid symbol") - } + // Decode the section's instruction stream. + insns := make(asm.Instructions, 0, section.Size/asm.InstructionSize) + insns, err := asm.AppendInstructions(insns, r, ec.ByteOrder, platform.Linux) + if err != nil { + return nil, fmt.Errorf("decoding instructions for section %s: %w", section.Name, err) + } + if len(insns) == 0 { + return nil, fmt.Errorf("no instructions found in section %s", section.Name) + } - // Reached the end of the section and the decoded instruction buffer - // contains at least one valid instruction belonging to a function. - // Store the result and stop processing instructions. - funcs[fn] = insns - break - } - if err != nil { - return nil, fmt.Errorf("offset %d: %w", offset, err) - } + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + offset := iter.Offset.Bytes() - // Decoded the first instruction of a function body but insns already - // holds a valid instruction stream. Store the result and flush insns. - if ins.Symbol != "" && insns.Name() != "" { - funcs[insns.Name()] = insns - insns = nil + // Tag Symbol Instructions. + if sym, ok := section.symbols[offset]; ok { + *ins = ins.WithSymbol(sym.Name) } + // Apply any relocations for the current instruction. + // If no relocation is present, resolve any section-relative function calls. if rel, ok := section.relocations[offset]; ok { - // A relocation was found for the current offset. Apply it to the insn. - if err = ec.relocateInstruction(&ins, rel); err != nil { - return nil, fmt.Errorf("offset %d: relocate instruction: %w", offset, err) + if err := ec.relocateInstruction(ins, rel); err != nil { + return nil, fmt.Errorf("offset %d: relocating instruction: %w", offset, err) } } else { - // Up to LLVM 9, calls to subprograms within the same ELF section are - // sometimes encoded using relative jumps without relocation entries. - // If, after all relocations entries have been processed, there are - // still relative pseudocalls left, they must point to an existing - // symbol within the section. - // When splitting sections into subprograms, the targets of these calls - // are no longer in scope, so they must be resolved here. - if ins.IsFunctionReference() && ins.Constant != -1 { - tgt := jumpTarget(offset, ins) - sym := section.symbols[tgt].Name - if sym == "" { - return nil, fmt.Errorf("offset %d: no jump target found at offset %d", offset, tgt) - } - - ins.Reference = sym - ins.Constant = -1 + if err := referenceRelativeJump(ins, offset, section.symbols); err != nil { + return nil, fmt.Errorf("offset %d: resolving relative jump: %w", offset, err) } } + } - insns = append(insns, ins) - offset += n + if ec.extInfo != nil { + ec.extInfo.Assign(insns, section.Name) } - return funcs, nil + return splitSymbols(insns) +} + +// referenceRelativeJump turns a relative jump to another bpf subprogram within +// the same ELF section into a Reference Instruction. +// +// Up to LLVM 9, calls to subprograms within the same ELF section are sometimes +// encoded using relative jumps instead of relocation entries. These jumps go +// out of bounds of the current program, so their targets must be memoized +// before the section's instruction stream is split. +// +// The relative jump Constant is blinded to -1 and the target Symbol is set as +// the Instruction's Reference so it can be resolved by the linker. +func referenceRelativeJump(ins *asm.Instruction, offset uint64, symbols map[uint64]elf.Symbol) error { + if !ins.IsFunctionReference() || ins.Constant == -1 { + return nil + } + + tgt := jumpTarget(offset, *ins) + sym := symbols[tgt].Name + if sym == "" { + return fmt.Errorf("no jump target found at offset %d", tgt) + } + + *ins = ins.WithReference(sym) + ins.Constant = -1 + + return nil } // jumpTarget takes ins' offset within an instruction stream (in bytes) @@ -429,6 +493,8 @@ func jumpTarget(offset uint64, ins asm.Instruction) uint64 { return uint64(dest) } +var errUnsupportedBinding = errors.New("unsupported binding") + func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) error { var ( typ = elf.ST_TYPE(rel.Info) @@ -440,10 +506,14 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err switch target.kind { case mapSection, btfMapSection: - if bind != elf.STB_GLOBAL { + if bind == elf.STB_LOCAL { return fmt.Errorf("possible erroneous static qualifier on map definition: found reference to %q", name) } + if bind != elf.STB_GLOBAL { + return fmt.Errorf("map %q: %w: %s", name, errUnsupportedBinding, bind) + } + if typ != elf.STT_OBJECT && typ != elf.STT_NOTYPE { // STT_NOTYPE is generated on clang < 8 which doesn't tag // relocations appropriately. @@ -452,18 +522,12 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err ins.Src = asm.PseudoMapFD - // Mark the instruction as needing an update when creating the - // collection. - if err := ins.RewriteMapPtr(-1); err != nil { - return err - } - case dataSection: var offset uint32 switch typ { case elf.STT_SECTION: if bind != elf.STB_LOCAL { - return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind) + return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind) } // This is really a reference to a static symbol, which clang doesn't @@ -472,8 +536,17 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err offset = uint32(uint64(ins.Constant)) case elf.STT_OBJECT: - if bind != elf.STB_GLOBAL { - return fmt.Errorf("direct load: %s: unsupported relocation %s", name, bind) + // LLVM 9 emits OBJECT-LOCAL symbols for anonymous constants. + if bind != elf.STB_GLOBAL && bind != elf.STB_LOCAL && bind != elf.STB_WEAK { + return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + offset = uint32(rel.Value) + + case elf.STT_NOTYPE: + // LLVM 7 emits NOTYPE-LOCAL symbols for anonymous constants. + if bind != elf.STB_LOCAL { + return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind) } offset = uint32(rel.Value) @@ -491,12 +564,6 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err ins.Constant = int64(uint64(offset) << 32) ins.Src = asm.PseudoMapValue - // Mark the instruction as needing an update when creating the - // collection. - if err := ins.RewriteMapPtr(-1); err != nil { - return err - } - case programSection: switch opCode := ins.OpCode; { case opCode.JumpOp() == asm.Call: @@ -507,12 +574,12 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err switch typ { case elf.STT_NOTYPE, elf.STT_FUNC: if bind != elf.STB_GLOBAL { - return fmt.Errorf("call: %s: unsupported binding: %s", name, bind) + return fmt.Errorf("call: %s: %w: %s", name, errUnsupportedBinding, bind) } case elf.STT_SECTION: if bind != elf.STB_LOCAL { - return fmt.Errorf("call: %s: unsupported binding: %s", name, bind) + return fmt.Errorf("call: %s: %w: %s", name, errUnsupportedBinding, bind) } // The function we want to call is in the indicated section, @@ -535,12 +602,12 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err switch typ { case elf.STT_FUNC: if bind != elf.STB_GLOBAL { - return fmt.Errorf("load: %s: unsupported binding: %s", name, bind) + return fmt.Errorf("load: %s: %w: %s", name, errUnsupportedBinding, bind) } case elf.STT_SECTION: if bind != elf.STB_LOCAL { - return fmt.Errorf("load: %s: unsupported binding: %s", name, bind) + return fmt.Errorf("load: %s: %w: %s", name, errUnsupportedBinding, bind) } // ins.Constant already contains the offset in bytes from the @@ -564,26 +631,86 @@ func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) err return fmt.Errorf("neither a call nor a load instruction: %v", ins) } + // The Undefined section is used for 'virtual' symbols that aren't backed by + // an ELF section. This includes symbol references from inline asm, forward + // function declarations, as well as extern kfunc declarations using __ksym + // and extern kconfig variables declared using __kconfig. case undefSection: - if bind != elf.STB_GLOBAL { - return fmt.Errorf("asm relocation: %s: unsupported binding: %s", name, bind) + if bind != elf.STB_GLOBAL && bind != elf.STB_WEAK { + return fmt.Errorf("asm relocation: %s: %w: %s", name, errUnsupportedBinding, bind) } if typ != elf.STT_NOTYPE { return fmt.Errorf("asm relocation: %s: unsupported type %s", name, typ) } - // There is nothing to do here but set ins.Reference. + kf := ec.kfuncs[name] + _, ks := ec.ksyms[name] + + switch { + // If a Call / DWordLoad instruction is found and the datasec has a btf.Func with a Name + // that matches the symbol name we mark the instruction as a referencing a kfunc. + case kf != nil && ins.OpCode.JumpOp() == asm.Call: + ins.Metadata.Set(kfuncMetaKey{}, &kfuncMeta{ + Func: kf, + Binding: bind, + }) + + ins.Src = asm.PseudoKfuncCall + ins.Constant = -1 + + case kf != nil && ins.OpCode.IsDWordLoad(): + ins.Metadata.Set(kfuncMetaKey{}, &kfuncMeta{ + Func: kf, + Binding: bind, + }) + + ins.Constant = 0 + + case ks && ins.OpCode.IsDWordLoad(): + if bind != elf.STB_GLOBAL && bind != elf.STB_WEAK { + return fmt.Errorf("asm relocation: %s: %w: %s", name, errUnsupportedBinding, bind) + } + ins.Metadata.Set(ksymMetaKey{}, &ksymMeta{ + Binding: bind, + Name: name, + }) + + // If no kconfig map is found, this must be a symbol reference from inline + // asm (see testdata/loader.c:asm_relocation()) or a call to a forward + // function declaration (see testdata/fwd_decl.c). Don't interfere, These + // remain standard symbol references. + // extern __kconfig reads are represented as dword loads that need to be + // rewritten to pseudo map loads from .kconfig. If the map is present, + // require it to contain the symbol to disambiguate between inline asm + // relos and kconfigs. + case ec.kconfig != nil && ins.OpCode.IsDWordLoad(): + if bind != elf.STB_GLOBAL { + return fmt.Errorf("asm relocation: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + for _, vsi := range ec.kconfig.Value.(*btf.Datasec).Vars { + if vsi.Type.(*btf.Var).Name != rel.Name { + continue + } + + ins.Src = asm.PseudoMapValue + ins.Metadata.Set(kconfigMetaKey{}, &kconfigMeta{ec.kconfig, vsi.Offset}) + return nil + } + + return fmt.Errorf("kconfig %s not found in .kconfig", rel.Name) + } default: return fmt.Errorf("relocation to %q: %w", target.Name, ErrNotSupported) } - ins.Reference = name + *ins = ins.WithReference(name) return nil } -func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error { +func (ec *elfCode) loadMaps() error { for _, sec := range ec.sections { if sec.kind != mapSection { continue @@ -598,6 +725,22 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error { return fmt.Errorf("section %v: map descriptors are not of equal size", sec.Name) } + // If the ELF has BTF, pull out the btf.Var for each map definition to + // extract decl tags from. + varsByName := make(map[string]*btf.Var) + if ec.btf != nil { + var ds *btf.Datasec + if err := ec.btf.TypeByName(sec.Name, &ds); err == nil { + for _, vsi := range ds.Vars { + v, ok := btf.As[*btf.Var](vsi.Type) + if !ok { + return fmt.Errorf("section %v: btf.VarSecInfo doesn't point to a *btf.Var: %T", sec.Name, vsi.Type) + } + varsByName[string(v.Name)] = v + } + } + } + var ( r = bufio.NewReader(sec.Open()) size = sec.Size / uint64(nSym) @@ -609,14 +752,14 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error { } mapName := mapSym.Name - if maps[mapName] != nil { + if ec.maps[mapName] != nil { return fmt.Errorf("section %v: map %v already exists", sec.Name, mapSym) } lr := io.LimitReader(r, int64(size)) spec := MapSpec{ - Name: SanitizeName(mapName, -1), + Name: sanitizeName(mapName, -1), } switch { case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil: @@ -639,11 +782,11 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error { spec.Extra = bytes.NewReader(extra) } - if err := spec.clampPerfEventArraySize(); err != nil { - return fmt.Errorf("map %s: %w", mapName, err) + if v, ok := varsByName[mapName]; ok { + spec.Tags = slices.Clone(v.Tags) } - maps[mapName] = &spec + ec.maps[mapName] = &spec } } @@ -653,7 +796,7 @@ func (ec *elfCode) loadMaps(maps map[string]*MapSpec) error { // loadBTFMaps iterates over all ELF sections marked as BTF map sections // (like .maps) and parses them into MapSpecs. Dump the .maps section and // any relocations with `readelf -x .maps -r `. -func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error { +func (ec *elfCode) loadBTFMaps() error { for _, sec := range ec.sections { if sec.kind != btfMapSection { continue @@ -692,12 +835,12 @@ func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error { return fmt.Errorf("section %v: map %s: initializing BTF map definitions: %w", sec.Name, name, internal.ErrNotSupported) } - if maps[name] != nil { + if ec.maps[name] != nil { return fmt.Errorf("section %v: map %s already exists", sec.Name, name) } // Each Var representing a BTF map definition contains a Struct. - mapStruct, ok := v.Type.(*btf.Struct) + mapStruct, ok := btf.UnderlyingType(v.Type).(*btf.Struct) if !ok { return fmt.Errorf("expected struct, got %s", v.Type) } @@ -707,11 +850,7 @@ func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error { return fmt.Errorf("map %v: %w", name, err) } - if err := mapSpec.clampPerfEventArraySize(); err != nil { - return fmt.Errorf("map %v: %w", name, err) - } - - maps[name] = mapSpec + ec.maps[name] = mapSpec } // Drain the ELF section reader to make sure all bytes are accounted for @@ -728,24 +867,17 @@ func (ec *elfCode) loadBTFMaps(maps map[string]*MapSpec) error { return nil } -// A programStub is a placeholder for a Program to be inserted at a certain map key. -// It needs to be resolved into a Program later on in the loader process. -type programStub string - -// A mapStub is a placeholder for a Map to be inserted at a certain map key. -// It needs to be resolved into a Map later on in the loader process. -type mapStub string - // mapSpecFromBTF produces a MapSpec based on a btf.Struct def representing // a BTF map definition. The name and spec arguments will be copied to the -// resulting MapSpec, and inner must be true on any resursive invocations. +// resulting MapSpec, and inner must be true on any recursive invocations. func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *btf.Spec, name string, inner bool) (*MapSpec, error) { var ( key, value btf.Type - keySize, valueSize uint32 + keySize, valueSize uint64 mapType MapType - flags, maxEntries uint32 + flags, maxEntries uint64 pinType PinType + mapExtra uint64 innerMapSpec *MapSpec contents []MapKV err error @@ -789,7 +921,7 @@ func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *b return nil, fmt.Errorf("can't get size of BTF key: %w", err) } - keySize = uint32(size) + keySize = uint64(size) case "value": if valueSize != 0 { @@ -808,7 +940,7 @@ func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *b return nil, fmt.Errorf("can't get size of BTF value: %w", err) } - valueSize = uint32(size) + valueSize = uint64(size) case "key_size": // Key needs to be nil and keySize needs to be 0 for key_size to be @@ -903,46 +1035,71 @@ func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *b return nil, fmt.Errorf("resolving values contents: %w", err) } + case "map_extra": + mapExtra, err = uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("resolving map_extra: %w", err) + } + default: return nil, fmt.Errorf("unrecognized field %s in BTF map definition", member.Name) } } - if key == nil { - key = &btf.Void{} + // Some maps don't support value sizes, but annotating their map definitions + // with __type macros can still be useful, especially to let bpf2go generate + // type definitions for them. + if value != nil && !mapType.canHaveValueSize() { + valueSize = 0 } - if value == nil { - value = &btf.Void{} + + v, ok := btf.As[*btf.Var](vs.Type) + if !ok { + return nil, fmt.Errorf("BTF map definition: btf.VarSecInfo doesn't point to a *btf.Var: %T", vs.Type) } return &MapSpec{ - Name: SanitizeName(name, -1), + Name: sanitizeName(name, -1), Type: MapType(mapType), - KeySize: keySize, - ValueSize: valueSize, - MaxEntries: maxEntries, - Flags: flags, - BTF: &btf.Map{Spec: spec, Key: key, Value: value}, + KeySize: uint32(keySize), + ValueSize: uint32(valueSize), + MaxEntries: uint32(maxEntries), + Flags: uint32(flags), + Key: key, + Value: value, Pinning: pinType, InnerMap: innerMapSpec, Contents: contents, + Tags: slices.Clone(v.Tags), + MapExtra: mapExtra, }, nil } -// uintFromBTF resolves the __uint macro, which is a pointer to a sized -// array, e.g. for int (*foo)[10], this function will return 10. -func uintFromBTF(typ btf.Type) (uint32, error) { - ptr, ok := typ.(*btf.Pointer) - if !ok { - return 0, fmt.Errorf("not a pointer: %v", typ) - } +// uintFromBTF resolves the __uint and __ulong macros. +// +// __uint emits a pointer to a sized array. For int (*foo)[10], this function +// will return 10. +// +// __ulong emits an enum with a single value that can represent a 64-bit +// integer. The first (and only) enum value is returned. +func uintFromBTF(typ btf.Type) (uint64, error) { + switch t := typ.(type) { + case *btf.Pointer: + arr, ok := t.Target.(*btf.Array) + if !ok { + return 0, fmt.Errorf("not a pointer to array: %v", typ) + } + return uint64(arr.Nelems), nil - arr, ok := ptr.Target.(*btf.Array) - if !ok { - return 0, fmt.Errorf("not a pointer to array: %v", typ) - } + case *btf.Enum: + if len(t.Values) == 0 { + return 0, errors.New("enum has no values") + } + return t.Values[0].Value, nil - return arr.Nelems, nil + default: + return 0, fmt.Errorf("not a pointer or enum: %v", typ) + } } // resolveBTFArrayMacro resolves the __array macro, which declares an array @@ -974,14 +1131,14 @@ func resolveBTFValuesContents(es *elfSection, vs *btf.VarSecinfo, member btf.Mem // The offset of the 'values' member within the _struct_ (in bits) // is the starting point of the array. Convert to bytes. Add VarSecinfo // offset to get the absolute position in the ELF blob. - start := (member.OffsetBits / 8) + vs.Offset + start := member.Offset.Bytes() + vs.Offset // 'values' is encoded in BTF as a zero (variable) length struct // member, and its contents run until the end of the VarSecinfo. // Add VarSecinfo offset to get the absolute position in the ELF blob. end := vs.Size + vs.Offset // The size of an address in this section. This determines the width of // an index in the array. - align := uint32(es.SectionHeader.Addralign) + align := uint32(es.Addralign) // Check if variable-length section is aligned. if (end-start)%align != 0 { @@ -1009,168 +1166,320 @@ func resolveBTFValuesContents(es *elfSection, vs *btf.VarSecinfo, member btf.Mem // skipped here. switch t := elf.ST_TYPE(r.Info); t { case elf.STT_FUNC: - contents = append(contents, MapKV{uint32(k), programStub(r.Name)}) + contents = append(contents, MapKV{uint32(k), r.Name}) case elf.STT_OBJECT: - contents = append(contents, MapKV{uint32(k), mapStub(r.Name)}) + contents = append(contents, MapKV{uint32(k), r.Name}) default: - return nil, fmt.Errorf("unknown relocation type %v", t) + return nil, fmt.Errorf("unknown relocation type %v for symbol %s", t, r.Name) } } return contents, nil } -func (ec *elfCode) loadDataSections(maps map[string]*MapSpec) error { +func (ec *elfCode) loadDataSections() error { for _, sec := range ec.sections { if sec.kind != dataSection { continue } - if sec.references == 0 { - // Prune data sections which are not referenced by any - // instructions. + // If a section has no references, it will be freed as soon as the + // Collection closes, so creating and populating it is wasteful. If it has + // no symbols, it is likely an ephemeral section used during compilation + // that wasn't sanitized by the bpf linker. (like .rodata.str1.1) + // + // No symbols means no VariableSpecs can be generated from it, making it + // pointless to emit a data section for. + if sec.references == 0 && len(sec.symbols) == 0 { continue } - if ec.btf == nil { - return errors.New("data sections require BTF, make sure all consts are marked as static") + if sec.Size > math.MaxUint32 { + return fmt.Errorf("data section %s: contents exceed maximum size", sec.Name) } - var datasec *btf.Datasec - if err := ec.btf.TypeByName(sec.Name, &datasec); err != nil { - return fmt.Errorf("data section %s: can't get BTF: %w", sec.Name, err) + mapSpec := &MapSpec{ + Name: sanitizeName(sec.Name, -1), + Type: Array, + KeySize: 4, + ValueSize: uint32(sec.Size), + MaxEntries: 1, } - data, err := sec.Data() - if err != nil { - return fmt.Errorf("data section %s: can't get contents: %w", sec.Name, err) + if isConstantDataSection(sec.Name) { + mapSpec.Flags = sys.BPF_F_RDONLY_PROG } - if uint64(len(data)) > math.MaxUint32 { - return fmt.Errorf("data section %s: contents exceed maximum size", sec.Name) + switch sec.Type { + // Only open the section if we know there's actual data to be read. + case elf.SHT_PROGBITS: + data, err := sec.Data() + if err != nil { + return fmt.Errorf("data section %s: can't get contents: %w", sec.Name, err) + } + mapSpec.Contents = []MapKV{{uint32(0), data}} + + case elf.SHT_NOBITS: + // NOBITS sections like .bss contain only zeroes and are not allocated in + // the ELF. Since data sections are Arrays, the kernel can preallocate + // them. Don't attempt reading zeroes from the ELF, instead allocate the + // zeroed memory to support getting and setting VariableSpecs for sections + // like .bss. + mapSpec.Contents = []MapKV{{uint32(0), make([]byte, sec.Size)}} + + default: + return fmt.Errorf("data section %s: unknown section type %s", sec.Name, sec.Type) } - mapSpec := &MapSpec{ - Name: SanitizeName(sec.Name, -1), - Type: Array, - KeySize: 4, - ValueSize: uint32(len(data)), - MaxEntries: 1, - Contents: []MapKV{{uint32(0), data}}, - BTF: &btf.Map{Spec: ec.btf, Key: &btf.Void{}, Value: datasec}, + for off, sym := range sec.symbols { + // Skip symbols marked with the 'hidden' attribute. + if elf.ST_VISIBILITY(sym.Other) == elf.STV_HIDDEN || + elf.ST_VISIBILITY(sym.Other) == elf.STV_INTERNAL { + continue + } + + // Only accept symbols with global or weak bindings. The common + // alternative is STB_LOCAL, which are either function-scoped or declared + // 'static'. + if elf.ST_BIND(sym.Info) != elf.STB_GLOBAL && + elf.ST_BIND(sym.Info) != elf.STB_WEAK { + continue + } + + if ec.vars[sym.Name] != nil { + return fmt.Errorf("data section %s: duplicate variable %s", sec.Name, sym.Name) + } + + // Skip symbols starting with a dot, they are compiler-internal symbols + // emitted by clang 11 and earlier and are not cleaned up by the bpf + // compiler backend (e.g. symbols named .Lconstinit.1 in sections like + // .rodata.cst32). Variables in C cannot start with a dot, so filter these + // out. + if strings.HasPrefix(sym.Name, ".") { + continue + } + + ec.vars[sym.Name] = &VariableSpec{ + name: sym.Name, + offset: off, + size: sym.Size, + m: mapSpec, + } } - switch sec.Name { - case ".rodata": - mapSpec.Flags = unix.BPF_F_RDONLY_PROG - mapSpec.Freeze = true - case ".bss": - // The kernel already zero-initializes the map - mapSpec.Contents = nil + // It is possible for a data section to exist without a corresponding BTF Datasec + // if it only contains anonymous values like macro-defined arrays. + if ec.btf != nil { + var ds *btf.Datasec + if ec.btf.TypeByName(sec.Name, &ds) == nil { + // Assign the spec's key and BTF only if the Datasec lookup was successful. + mapSpec.Key = &btf.Void{} + mapSpec.Value = ds + + // Populate VariableSpecs with type information, if available. + for _, v := range ds.Vars { + name := v.Type.TypeName() + if name == "" { + return fmt.Errorf("data section %s: anonymous variable %v", sec.Name, v) + } + + vt, ok := v.Type.(*btf.Var) + if !ok { + return fmt.Errorf("data section %s: unexpected type %T for variable %s", sec.Name, v.Type, name) + } + + ev := ec.vars[name] + if ev == nil { + // Hidden symbols appear in the BTF Datasec but don't receive a VariableSpec. + continue + } + + if uint64(v.Offset) != ev.offset { + return fmt.Errorf("data section %s: variable %s datasec offset (%d) doesn't match ELF symbol offset (%d)", sec.Name, name, v.Offset, ev.offset) + } + + if uint64(v.Size) != ev.size { + return fmt.Errorf("data section %s: variable %s size in datasec (%d) doesn't match ELF symbol size (%d)", sec.Name, name, v.Size, ev.size) + } + + // Decouple the Var in the VariableSpec from the underlying DataSec in + // the MapSpec to avoid modifications from affecting map loads later on. + ev.t = btf.Copy(vt).(*btf.Var) + } + } } - maps[sec.Name] = mapSpec + ec.maps[sec.Name] = mapSpec } + return nil } -func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) { - types := map[string]struct { - progType ProgramType - attachType AttachType - progFlags uint32 - }{ - // Please update the types from libbpf.c and follow the order of it. - // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c - "socket": {SocketFilter, AttachNone, 0}, - "sk_reuseport/migrate": {SkReuseport, AttachSkReuseportSelectOrMigrate, 0}, - "sk_reuseport": {SkReuseport, AttachSkReuseportSelect, 0}, - "kprobe/": {Kprobe, AttachNone, 0}, - "uprobe/": {Kprobe, AttachNone, 0}, - "kretprobe/": {Kprobe, AttachNone, 0}, - "uretprobe/": {Kprobe, AttachNone, 0}, - "tc": {SchedCLS, AttachNone, 0}, - "classifier": {SchedCLS, AttachNone, 0}, - "action": {SchedACT, AttachNone, 0}, - "tracepoint/": {TracePoint, AttachNone, 0}, - "tp/": {TracePoint, AttachNone, 0}, - "raw_tracepoint/": {RawTracepoint, AttachNone, 0}, - "raw_tp/": {RawTracepoint, AttachNone, 0}, - "raw_tracepoint.w/": {RawTracepointWritable, AttachNone, 0}, - "raw_tp.w/": {RawTracepointWritable, AttachNone, 0}, - "tp_btf/": {Tracing, AttachTraceRawTp, 0}, - "fentry/": {Tracing, AttachTraceFEntry, 0}, - "fmod_ret/": {Tracing, AttachModifyReturn, 0}, - "fexit/": {Tracing, AttachTraceFExit, 0}, - "fentry.s/": {Tracing, AttachTraceFEntry, unix.BPF_F_SLEEPABLE}, - "fmod_ret.s/": {Tracing, AttachModifyReturn, unix.BPF_F_SLEEPABLE}, - "fexit.s/": {Tracing, AttachTraceFExit, unix.BPF_F_SLEEPABLE}, - "freplace/": {Extension, AttachNone, 0}, - "lsm/": {LSM, AttachLSMMac, 0}, - "lsm.s/": {LSM, AttachLSMMac, unix.BPF_F_SLEEPABLE}, - "iter/": {Tracing, AttachTraceIter, 0}, - "syscall": {Syscall, AttachNone, 0}, - "xdp_devmap/": {XDP, AttachXDPDevMap, 0}, - "xdp_cpumap/": {XDP, AttachXDPCPUMap, 0}, - "xdp": {XDP, AttachNone, 0}, - "perf_event": {PerfEvent, AttachNone, 0}, - "lwt_in": {LWTIn, AttachNone, 0}, - "lwt_out": {LWTOut, AttachNone, 0}, - "lwt_xmit": {LWTXmit, AttachNone, 0}, - "lwt_seg6local": {LWTSeg6Local, AttachNone, 0}, - "cgroup_skb/ingress": {CGroupSKB, AttachCGroupInetIngress, 0}, - "cgroup_skb/egress": {CGroupSKB, AttachCGroupInetEgress, 0}, - "cgroup/skb": {CGroupSKB, AttachNone, 0}, - "cgroup/sock_create": {CGroupSKB, AttachCGroupInetSockCreate, 0}, - "cgroup/sock_release": {CGroupSKB, AttachCgroupInetSockRelease, 0}, - "cgroup/sock": {CGroupSock, AttachCGroupInetSockCreate, 0}, - "cgroup/post_bind4": {CGroupSock, AttachCGroupInet4PostBind, 0}, - "cgroup/post_bind6": {CGroupSock, AttachCGroupInet6PostBind, 0}, - "cgroup/dev": {CGroupDevice, AttachCGroupDevice, 0}, - "sockops": {SockOps, AttachCGroupSockOps, 0}, - "sk_skb/stream_parser": {SkSKB, AttachSkSKBStreamParser, 0}, - "sk_skb/stream_verdict": {SkSKB, AttachSkSKBStreamVerdict, 0}, - "sk_skb": {SkSKB, AttachNone, 0}, - "sk_msg": {SkMsg, AttachSkMsgVerdict, 0}, - "lirc_mode2": {LircMode2, AttachLircMode2, 0}, - "flow_dissector": {FlowDissector, AttachFlowDissector, 0}, - "cgroup/bind4": {CGroupSockAddr, AttachCGroupInet4Bind, 0}, - "cgroup/bind6": {CGroupSockAddr, AttachCGroupInet6Bind, 0}, - "cgroup/connect4": {CGroupSockAddr, AttachCGroupInet4Connect, 0}, - "cgroup/connect6": {CGroupSockAddr, AttachCGroupInet6Connect, 0}, - "cgroup/sendmsg4": {CGroupSockAddr, AttachCGroupUDP4Sendmsg, 0}, - "cgroup/sendmsg6": {CGroupSockAddr, AttachCGroupUDP6Sendmsg, 0}, - "cgroup/recvmsg4": {CGroupSockAddr, AttachCGroupUDP4Recvmsg, 0}, - "cgroup/recvmsg6": {CGroupSockAddr, AttachCGroupUDP6Recvmsg, 0}, - "cgroup/getpeername4": {CGroupSockAddr, AttachCgroupInet4GetPeername, 0}, - "cgroup/getpeername6": {CGroupSockAddr, AttachCgroupInet6GetPeername, 0}, - "cgroup/getsockname4": {CGroupSockAddr, AttachCgroupInet4GetSockname, 0}, - "cgroup/getsockname6": {CGroupSockAddr, AttachCgroupInet6GetSockname, 0}, - "cgroup/sysctl": {CGroupSysctl, AttachCGroupSysctl, 0}, - "cgroup/getsockopt": {CGroupSockopt, AttachCGroupGetsockopt, 0}, - "cgroup/setsockopt": {CGroupSockopt, AttachCGroupSetsockopt, 0}, - "struct_ops+": {StructOps, AttachNone, 0}, - "sk_lookup/": {SkLookup, AttachSkLookup, 0}, - - "seccomp": {SocketFilter, AttachNone, 0}, +// loadKconfigSection handles the 'virtual' Datasec .kconfig that doesn't +// have a corresponding ELF section and exist purely in BTF. +func (ec *elfCode) loadKconfigSection() error { + if ec.btf == nil { + return nil + } + + var ds *btf.Datasec + err := ec.btf.TypeByName(".kconfig", &ds) + if errors.Is(err, btf.ErrNotFound) { + return nil + } + if err != nil { + return err + } + + if ds.Size == 0 { + return errors.New("zero-length .kconfig") } - for prefix, t := range types { - if !strings.HasPrefix(sectionName, prefix) { + ec.kconfig = &MapSpec{ + Name: ".kconfig", + Type: Array, + KeySize: uint32(4), + ValueSize: ds.Size, + MaxEntries: 1, + Flags: sys.BPF_F_RDONLY_PROG, + Key: &btf.Int{Size: 4}, + Value: ds, + } + + return nil +} + +// loadKsymsSection handles the 'virtual' Datasec .ksyms that doesn't +// have a corresponding ELF section and exist purely in BTF. +func (ec *elfCode) loadKsymsSection() error { + if ec.btf == nil { + return nil + } + + var ds *btf.Datasec + err := ec.btf.TypeByName(".ksyms", &ds) + if errors.Is(err, btf.ErrNotFound) { + return nil + } + if err != nil { + return err + } + + for _, v := range ds.Vars { + switch t := v.Type.(type) { + case *btf.Func: + ec.kfuncs[t.TypeName()] = t + case *btf.Var: + ec.ksyms[t.TypeName()] = struct{}{} + default: + return fmt.Errorf("unexpected variable type in .ksyms: %T", v) + } + } + + return nil +} + +type libbpfElfSectionDef struct { + pattern string + programType sys.ProgType + attachType sys.AttachType + flags libbpfElfSectionFlag +} + +type libbpfElfSectionFlag uint32 + +// The values correspond to enum sec_def_flags in libbpf. +const ( + _SEC_NONE libbpfElfSectionFlag = 0 + + _SEC_EXP_ATTACH_OPT libbpfElfSectionFlag = 1 << (iota - 1) + _SEC_ATTACHABLE + _SEC_ATTACH_BTF + _SEC_SLEEPABLE + _SEC_XDP_FRAGS + _SEC_USDT + + // Ignore any present extra in order to preserve backwards compatibility + // with earlier versions of the library. + ignoreExtra + + _SEC_ATTACHABLE_OPT = _SEC_ATTACHABLE | _SEC_EXP_ATTACH_OPT +) + +func init() { + // Compatibility with older versions of the library. + // We prepend libbpf definitions since they contain a prefix match + // for "xdp". + elfSectionDefs = append([]libbpfElfSectionDef{ + {"xdp.frags/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_XDP_FRAGS | ignoreExtra}, + {"xdp.frags_devmap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_XDP_FRAGS}, + {"xdp_devmap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, 0}, + {"xdp.frags_cpumap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_XDP_FRAGS}, + {"xdp_cpumap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, 0}, + // This has been in the library since the beginning of time. Not sure + // where it came from. + {"seccomp", sys.BPF_PROG_TYPE_SOCKET_FILTER, 0, _SEC_NONE}, + }, elfSectionDefs...) +} + +func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) { + // Skip optional program marking for now. + sectionName = strings.TrimPrefix(sectionName, "?") + + for _, t := range elfSectionDefs { + extra, ok := matchSectionName(sectionName, t.pattern) + if !ok { continue } - if !strings.HasSuffix(prefix, "/") { - return t.progType, t.attachType, t.progFlags, "" + programType := ProgramType(t.programType) + attachType := AttachType(t.attachType) + + var flags uint32 + if t.flags&_SEC_SLEEPABLE > 0 { + flags |= sys.BPF_F_SLEEPABLE + } + if t.flags&_SEC_XDP_FRAGS > 0 { + flags |= sys.BPF_F_XDP_HAS_FRAGS + } + if t.flags&_SEC_EXP_ATTACH_OPT > 0 { + if programType == XDP { + // The library doesn't yet have code to fallback to not specifying + // attach type. Only do this for XDP since we've enforced correct + // attach type for all other program types. + attachType = AttachNone + } + } + if t.flags&ignoreExtra > 0 { + extra = "" } - return t.progType, t.attachType, t.progFlags, sectionName[len(prefix):] + return programType, attachType, flags, extra } return UnspecifiedProgram, AttachNone, 0, "" } -func (ec *elfCode) loadRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) { +// matchSectionName checks a section name against a pattern. +// +// It's behaviour mirrors that of libbpf's sec_def_matches. +func matchSectionName(sectionName, pattern string) (extra string, found bool) { + have, extra, found := strings.Cut(sectionName, "/") + want := strings.TrimRight(pattern, "+/") + + if strings.HasSuffix(pattern, "/") { + // Section name must have a slash and extra may be empty. + return extra, have == want && found + } else if strings.HasSuffix(pattern, "+") { + // Section name may have a slash and extra may be empty. + return extra, have == want + } + + // Section name must have a prefix. extra is ignored. + return "", strings.HasPrefix(sectionName, pattern) +} + +func (ec *elfCode) loadSectionRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) { rels := make(map[uint64]elf.Symbol) if sec.Entsize < 16 { diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/elf_sections.go b/src/nvcgo/vendor/github.com/cilium/ebpf/elf_sections.go new file mode 100644 index 000000000..43dcfb103 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/elf_sections.go @@ -0,0 +1,111 @@ +// Code generated by internal/cmd/gensections.awk; DO NOT EDIT. + +package ebpf + +// Code in this file is derived from libbpf, available under BSD-2-Clause. + +import "github.com/cilium/ebpf/internal/sys" + +var elfSectionDefs = []libbpfElfSectionDef{ + {"socket", sys.BPF_PROG_TYPE_SOCKET_FILTER, 0, _SEC_NONE}, + {"sk_reuseport/migrate", sys.BPF_PROG_TYPE_SK_REUSEPORT, sys.BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, _SEC_ATTACHABLE}, + {"sk_reuseport", sys.BPF_PROG_TYPE_SK_REUSEPORT, sys.BPF_SK_REUSEPORT_SELECT, _SEC_ATTACHABLE}, + {"kprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"uprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"uprobe.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_SLEEPABLE}, + {"kretprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"uretprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"uretprobe.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_SLEEPABLE}, + {"kprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_MULTI, _SEC_NONE}, + {"kretprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_MULTI, _SEC_NONE}, + {"kprobe.session+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_SESSION, _SEC_NONE}, + {"uprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_NONE}, + {"uretprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_NONE}, + {"uprobe.multi.s+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_SLEEPABLE}, + {"uretprobe.multi.s+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_SLEEPABLE}, + {"ksyscall+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"kretsyscall+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"usdt+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_USDT}, + {"usdt.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_USDT | _SEC_SLEEPABLE}, + {"tc/ingress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_INGRESS, _SEC_NONE}, + {"tc/egress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_EGRESS, _SEC_NONE}, + {"tcx/ingress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_INGRESS, _SEC_NONE}, + {"tcx/egress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_EGRESS, _SEC_NONE}, + {"tc", sys.BPF_PROG_TYPE_SCHED_CLS, 0, _SEC_NONE}, + {"classifier", sys.BPF_PROG_TYPE_SCHED_CLS, 0, _SEC_NONE}, + {"action", sys.BPF_PROG_TYPE_SCHED_ACT, 0, _SEC_NONE}, + {"netkit/primary", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_NETKIT_PRIMARY, _SEC_NONE}, + {"netkit/peer", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_NETKIT_PEER, _SEC_NONE}, + {"tracepoint+", sys.BPF_PROG_TYPE_TRACEPOINT, 0, _SEC_NONE}, + {"tp+", sys.BPF_PROG_TYPE_TRACEPOINT, 0, _SEC_NONE}, + {"raw_tracepoint+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT, 0, _SEC_NONE}, + {"raw_tp+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT, 0, _SEC_NONE}, + {"raw_tracepoint.w+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 0, _SEC_NONE}, + {"raw_tp.w+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 0, _SEC_NONE}, + {"tp_btf+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_RAW_TP, _SEC_ATTACH_BTF}, + {"fentry+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FENTRY, _SEC_ATTACH_BTF}, + {"fmod_ret+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_MODIFY_RETURN, _SEC_ATTACH_BTF}, + {"fexit+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FEXIT, _SEC_ATTACH_BTF}, + {"fentry.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FENTRY, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"fmod_ret.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_MODIFY_RETURN, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"fexit.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FEXIT, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"freplace+", sys.BPF_PROG_TYPE_EXT, 0, _SEC_ATTACH_BTF}, + {"lsm+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_MAC, _SEC_ATTACH_BTF}, + {"lsm.s+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_MAC, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"lsm_cgroup+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_CGROUP, _SEC_ATTACH_BTF}, + {"iter+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_ITER, _SEC_ATTACH_BTF}, + {"iter.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_ITER, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"syscall", sys.BPF_PROG_TYPE_SYSCALL, 0, _SEC_SLEEPABLE}, + {"xdp.frags/devmap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_XDP_FRAGS}, + {"xdp/devmap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_ATTACHABLE}, + {"xdp.frags/cpumap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_XDP_FRAGS}, + {"xdp/cpumap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_ATTACHABLE}, + {"xdp.frags", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_XDP_FRAGS}, + {"xdp", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_ATTACHABLE_OPT}, + {"perf_event", sys.BPF_PROG_TYPE_PERF_EVENT, 0, _SEC_NONE}, + {"lwt_in", sys.BPF_PROG_TYPE_LWT_IN, 0, _SEC_NONE}, + {"lwt_out", sys.BPF_PROG_TYPE_LWT_OUT, 0, _SEC_NONE}, + {"lwt_xmit", sys.BPF_PROG_TYPE_LWT_XMIT, 0, _SEC_NONE}, + {"lwt_seg6local", sys.BPF_PROG_TYPE_LWT_SEG6LOCAL, 0, _SEC_NONE}, + {"sockops", sys.BPF_PROG_TYPE_SOCK_OPS, sys.BPF_CGROUP_SOCK_OPS, _SEC_ATTACHABLE_OPT}, + {"sk_skb/stream_parser", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_STREAM_PARSER, _SEC_ATTACHABLE_OPT}, + {"sk_skb/stream_verdict", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_STREAM_VERDICT, _SEC_ATTACHABLE_OPT}, + {"sk_skb/verdict", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_VERDICT, _SEC_ATTACHABLE_OPT}, + {"sk_skb", sys.BPF_PROG_TYPE_SK_SKB, 0, _SEC_NONE}, + {"sk_msg", sys.BPF_PROG_TYPE_SK_MSG, sys.BPF_SK_MSG_VERDICT, _SEC_ATTACHABLE_OPT}, + {"lirc_mode2", sys.BPF_PROG_TYPE_LIRC_MODE2, sys.BPF_LIRC_MODE2, _SEC_ATTACHABLE_OPT}, + {"flow_dissector", sys.BPF_PROG_TYPE_FLOW_DISSECTOR, sys.BPF_FLOW_DISSECTOR, _SEC_ATTACHABLE_OPT}, + {"cgroup_skb/ingress", sys.BPF_PROG_TYPE_CGROUP_SKB, sys.BPF_CGROUP_INET_INGRESS, _SEC_ATTACHABLE_OPT}, + {"cgroup_skb/egress", sys.BPF_PROG_TYPE_CGROUP_SKB, sys.BPF_CGROUP_INET_EGRESS, _SEC_ATTACHABLE_OPT}, + {"cgroup/skb", sys.BPF_PROG_TYPE_CGROUP_SKB, 0, _SEC_NONE}, + {"cgroup/sock_create", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_CREATE, _SEC_ATTACHABLE}, + {"cgroup/sock_release", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_RELEASE, _SEC_ATTACHABLE}, + {"cgroup/sock", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_CREATE, _SEC_ATTACHABLE_OPT}, + {"cgroup/post_bind4", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET4_POST_BIND, _SEC_ATTACHABLE}, + {"cgroup/post_bind6", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET6_POST_BIND, _SEC_ATTACHABLE}, + {"cgroup/bind4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_BIND, _SEC_ATTACHABLE}, + {"cgroup/bind6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_BIND, _SEC_ATTACHABLE}, + {"cgroup/connect4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_CONNECT, _SEC_ATTACHABLE}, + {"cgroup/connect6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_CONNECT, _SEC_ATTACHABLE}, + {"cgroup/connect_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_CONNECT, _SEC_ATTACHABLE}, + {"cgroup/sendmsg4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP4_SENDMSG, _SEC_ATTACHABLE}, + {"cgroup/sendmsg6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP6_SENDMSG, _SEC_ATTACHABLE}, + {"cgroup/sendmsg_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_SENDMSG, _SEC_ATTACHABLE}, + {"cgroup/recvmsg4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP4_RECVMSG, _SEC_ATTACHABLE}, + {"cgroup/recvmsg6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP6_RECVMSG, _SEC_ATTACHABLE}, + {"cgroup/recvmsg_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_RECVMSG, _SEC_ATTACHABLE}, + {"cgroup/getpeername4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_GETPEERNAME, _SEC_ATTACHABLE}, + {"cgroup/getpeername6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_GETPEERNAME, _SEC_ATTACHABLE}, + {"cgroup/getpeername_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_GETPEERNAME, _SEC_ATTACHABLE}, + {"cgroup/getsockname4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_GETSOCKNAME, _SEC_ATTACHABLE}, + {"cgroup/getsockname6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_GETSOCKNAME, _SEC_ATTACHABLE}, + {"cgroup/getsockname_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_GETSOCKNAME, _SEC_ATTACHABLE}, + {"cgroup/sysctl", sys.BPF_PROG_TYPE_CGROUP_SYSCTL, sys.BPF_CGROUP_SYSCTL, _SEC_ATTACHABLE}, + {"cgroup/getsockopt", sys.BPF_PROG_TYPE_CGROUP_SOCKOPT, sys.BPF_CGROUP_GETSOCKOPT, _SEC_ATTACHABLE}, + {"cgroup/setsockopt", sys.BPF_PROG_TYPE_CGROUP_SOCKOPT, sys.BPF_CGROUP_SETSOCKOPT, _SEC_ATTACHABLE}, + {"cgroup/dev", sys.BPF_PROG_TYPE_CGROUP_DEVICE, sys.BPF_CGROUP_DEVICE, _SEC_ATTACHABLE_OPT}, + {"struct_ops+", sys.BPF_PROG_TYPE_STRUCT_OPS, 0, _SEC_NONE}, + {"struct_ops.s+", sys.BPF_PROG_TYPE_STRUCT_OPS, 0, _SEC_SLEEPABLE}, + {"sk_lookup", sys.BPF_PROG_TYPE_SK_LOOKUP, sys.BPF_SK_LOOKUP, _SEC_ATTACHABLE}, + {"netfilter", sys.BPF_PROG_TYPE_NETFILTER, sys.BPF_NETFILTER, _SEC_NONE}, +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/info.go b/src/nvcgo/vendor/github.com/cilium/ebpf/info.go index cf692c762..23c819aaa 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/info.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/info.go @@ -8,64 +8,161 @@ import ( "fmt" "io" "os" - "strings" - "syscall" + "reflect" "time" - "unsafe" "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/internal/platform" "github.com/cilium/ebpf/internal/sys" "github.com/cilium/ebpf/internal/unix" ) +// The *Info structs expose metadata about a program or map. Most +// fields are exposed via a getter: +// +// func (*MapInfo) ID() (MapID, bool) +// +// This is because the metadata available changes based on kernel version. +// The second boolean return value indicates whether a particular field is +// available on the current kernel. +// +// Always add new metadata as such a getter, unless you can somehow get the +// value of the field on all supported kernels. Also document which version +// a particular field first appeared in. +// +// Some metadata is a buffer which needs additional parsing. In this case, +// store the undecoded data in the Info struct and provide a getter which +// decodes it when necessary. See ProgramInfo.Instructions for an example. + // MapInfo describes a map. type MapInfo struct { - Type MapType - id MapID - KeySize uint32 - ValueSize uint32 + // Type of the map. + Type MapType + // KeySize is the size of the map key in bytes. + KeySize uint32 + // ValueSize is the size of the map value in bytes. + ValueSize uint32 + // MaxEntries is the maximum number of entries the map can hold. Its meaning + // is map-specific. MaxEntries uint32 - Flags uint32 + // Flags used during map creation. + Flags uint32 // Name as supplied by user space at load time. Available from 4.15. Name string + + id MapID + btf btf.ID + mapExtra uint64 + memlock uint64 + frozen bool } -func newMapInfoFromFd(fd *sys.FD) (*MapInfo, error) { +// minimalMapInfoFromFd queries the minimum information needed to create a Map +// based on a file descriptor. This requires the map type, key/value sizes, +// maxentries and flags. +// +// Does not fall back to fdinfo since the version gap between fdinfo (4.10) and +// [sys.ObjInfo] (4.13) is small and both kernels are EOL since at least Nov +// 2017. +// +// Requires at least Linux 4.13. +func minimalMapInfoFromFd(fd *sys.FD) (*MapInfo, error) { var info sys.MapInfo - err := sys.ObjInfo(fd, &info) - if errors.Is(err, syscall.EINVAL) { - return newMapInfoFromProc(fd) + if err := sys.ObjInfo(fd, &info); err != nil { + return nil, fmt.Errorf("getting object info: %w", err) } + + typ, err := MapTypeForPlatform(platform.Native, info.Type) if err != nil { - return nil, err + return nil, fmt.Errorf("map type: %w", err) } return &MapInfo{ - MapType(info.Type), - MapID(info.Id), + Type: typ, + KeySize: info.KeySize, + ValueSize: info.ValueSize, + MaxEntries: info.MaxEntries, + Flags: uint32(info.MapFlags), + Name: unix.ByteSliceToString(info.Name[:]), + }, nil +} + +// newMapInfoFromFd queries map information about the given fd. [sys.ObjInfo] is +// attempted first, supplementing any missing values with information from +// /proc/self/fdinfo. Ignores EINVAL from ObjInfo as well as ErrNotSupported +// from reading fdinfo (indicating the file exists, but no fields of interest +// were found). If both fail, an error is always returned. +func newMapInfoFromFd(fd *sys.FD) (*MapInfo, error) { + var info sys.MapInfo + err1 := sys.ObjInfo(fd, &info) + // EINVAL means the kernel doesn't support BPF_OBJ_GET_INFO_BY_FD. Continue + // with fdinfo if that's the case. + if err1 != nil && !errors.Is(err1, unix.EINVAL) { + return nil, fmt.Errorf("getting object info: %w", err1) + } + + typ, err := MapTypeForPlatform(platform.Native, info.Type) + if err != nil { + return nil, fmt.Errorf("map type: %w", err) + } + + mi := &MapInfo{ + typ, info.KeySize, info.ValueSize, info.MaxEntries, - info.MapFlags, + uint32(info.MapFlags), unix.ByteSliceToString(info.Name[:]), - }, nil + MapID(info.Id), + btf.ID(info.BtfId), + info.MapExtra, + 0, + false, + } + + // Supplement OBJ_INFO with data from /proc/self/fdinfo. It contains fields + // like memlock and frozen that are not present in OBJ_INFO. + err2 := readMapInfoFromProc(fd, mi) + if err2 != nil && !errors.Is(err2, ErrNotSupported) { + return nil, fmt.Errorf("getting map info from fdinfo: %w", err2) + } + + if err1 != nil && err2 != nil { + return nil, fmt.Errorf("ObjInfo and fdinfo both failed: objinfo: %w, fdinfo: %w", err1, err2) + } + + return mi, nil } -func newMapInfoFromProc(fd *sys.FD) (*MapInfo, error) { - var mi MapInfo +// readMapInfoFromProc queries map information about the given fd from +// /proc/self/fdinfo. It only writes data into fields that have a zero value. +func readMapInfoFromProc(fd *sys.FD, mi *MapInfo) error { + var mapType uint32 err := scanFdInfo(fd, map[string]interface{}{ - "map_type": &mi.Type, + "map_type": &mapType, + "map_id": &mi.id, "key_size": &mi.KeySize, "value_size": &mi.ValueSize, "max_entries": &mi.MaxEntries, "map_flags": &mi.Flags, + "map_extra": &mi.mapExtra, + "memlock": &mi.memlock, + "frozen": &mi.frozen, }) if err != nil { - return nil, err + return err } - return &mi, nil + + if mi.Type == 0 { + mi.Type, err = MapTypeForPlatform(platform.Linux, mapType) + if err != nil { + return fmt.Errorf("map type: %w", err) + } + } + + return nil } // ID returns the map ID. @@ -77,15 +174,120 @@ func (mi *MapInfo) ID() (MapID, bool) { return mi.id, mi.id > 0 } -// programStats holds statistics of a program. -type programStats struct { - // Total accumulated runtime of the program ins ns. - runtime time.Duration - // Total number of times the program was called. - runCount uint64 +// BTFID returns the BTF ID associated with the Map. +// +// The ID is only valid as long as the associated Map is kept alive. +// Available from 4.18. +// +// The bool return value indicates whether this optional field is available and +// populated. (The field may be available but not populated if the kernel +// supports the field but the Map was loaded without BTF information.) +func (mi *MapInfo) BTFID() (btf.ID, bool) { + return mi.btf, mi.btf > 0 +} + +// MapExtra returns an opaque field whose meaning is map-specific. +// +// Available from 5.16. +// +// The bool return value indicates whether this optional field is available and +// populated, if it was specified during Map creation. +func (mi *MapInfo) MapExtra() (uint64, bool) { + return mi.mapExtra, mi.mapExtra > 0 +} + +// Memlock returns an approximate number of bytes allocated to this map. +// +// Available from 4.10. +// +// The bool return value indicates whether this optional field is available. +func (mi *MapInfo) Memlock() (uint64, bool) { + return mi.memlock, mi.memlock > 0 +} + +// Frozen indicates whether [Map.Freeze] was called on this map. If true, +// modifications from user space are not allowed. +// +// Available from 5.2. Requires access to procfs. +// +// If the kernel doesn't support map freezing, this field will always be false. +func (mi *MapInfo) Frozen() bool { + return mi.frozen +} + +// ProgramStats contains runtime statistics for a single [Program], returned by +// [Program.Stats]. +// +// Will contain mostly zero values if the collection of statistics is not +// enabled, see [EnableStats]. +type ProgramStats struct { + // Total accumulated runtime of the Program. + // + // Requires at least Linux 5.8. + Runtime time.Duration + + // Total number of times the Program has executed. + // + // Requires at least Linux 5.8. + RunCount uint64 + + // Total number of times the program was not executed due to recursion. This + // can happen when another bpf program is already running on the cpu, when bpf + // program execution is interrupted, for example. + // + // Requires at least Linux 5.12. + RecursionMisses uint64 +} + +func newProgramStatsFromFd(fd *sys.FD) (*ProgramStats, error) { + var info sys.ProgInfo + if err := sys.ObjInfo(fd, &info); err != nil { + return nil, fmt.Errorf("getting program info: %w", err) + } + + return &ProgramStats{ + Runtime: time.Duration(info.RunTimeNs), + RunCount: info.RunCnt, + RecursionMisses: info.RecursionMisses, + }, nil +} + +// programJitedInfo holds information about JITed info of a program. +type programJitedInfo struct { + // ksyms holds the ksym addresses of the BPF program, including those of its + // subprograms. + // + // Available from 4.18. + ksyms []uint64 + numKsyms uint32 + + // insns holds the JITed machine native instructions of the program, + // including those of its subprograms. + // + // Available from 4.13. + insns []byte + numInsns uint32 + + // lineInfos holds the JITed line infos, which are kernel addresses. + // + // Available from 5.0. + lineInfos []uint64 + numLineInfos uint32 + + // lineInfoRecSize is the size of a single line info record. + // + // Available from 5.0. + lineInfoRecSize uint32 + + // funcLens holds the insns length of each function. + // + // Available from 4.18. + funcLens []uint32 + numFuncLens uint32 } -// ProgramInfo describes a program. +// ProgramInfo describes a Program's immutable metadata. For runtime statistics, +// see [ProgramStats]. type ProgramInfo struct { Type ProgramType id ProgramID @@ -94,76 +296,223 @@ type ProgramInfo struct { // Name as supplied by user space at load time. Available from 4.15. Name string - btf btf.ID - stats *programStats + createdByUID uint32 + haveCreatedByUID bool + btf btf.ID + loadTime time.Duration + + restricted bool + + maps []MapID + insns []byte + jitedSize uint32 + verifiedInstructions uint32 + + jitedInfo programJitedInfo + + lineInfos []byte + numLineInfos uint32 + funcInfos []byte + numFuncInfos uint32 + + memlock uint64 +} + +// minimalProgramFromFd queries the minimum information needed to create a +// Program based on a file descriptor, requiring at least the program type. +// +// Does not fall back to fdinfo since the version gap between fdinfo (4.10) and +// [sys.ObjInfo] (4.13) is small and both kernels are EOL since at least Nov +// 2017. +// +// Requires at least Linux 4.13. +func minimalProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) { + var info sys.ProgInfo + if err := sys.ObjInfo(fd, &info); err != nil { + return nil, fmt.Errorf("getting object info: %w", err) + } + + typ, err := ProgramTypeForPlatform(platform.Native, info.Type) + if err != nil { + return nil, fmt.Errorf("program type: %w", err) + } - maps []MapID - insns []byte + return &ProgramInfo{ + Type: typ, + Name: unix.ByteSliceToString(info.Name[:]), + }, nil } +// newProgramInfoFromFd queries program information about the given fd. +// +// [sys.ObjInfo] is attempted first, supplementing any missing values with +// information from /proc/self/fdinfo. Ignores EINVAL from ObjInfo as well as +// ErrNotSupported from reading fdinfo (indicating the file exists, but no +// fields of interest were found). If both fail, an error is always returned. func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) { var info sys.ProgInfo - err := sys.ObjInfo(fd, &info) - if errors.Is(err, syscall.EINVAL) { - return newProgramInfoFromProc(fd) + err1 := sys.ObjInfo(fd, &info) + // EINVAL means the kernel doesn't support BPF_OBJ_GET_INFO_BY_FD. Continue + // with fdinfo if that's the case. + if err1 != nil && !errors.Is(err1, unix.EINVAL) { + return nil, fmt.Errorf("getting object info: %w", err1) } + + typ, err := ProgramTypeForPlatform(platform.Native, info.Type) if err != nil { - return nil, err + return nil, fmt.Errorf("program type: %w", err) } pi := ProgramInfo{ - Type: ProgramType(info.Type), - id: ProgramID(info.Id), - Tag: hex.EncodeToString(info.Tag[:]), - Name: unix.ByteSliceToString(info.Name[:]), - btf: btf.ID(info.BtfId), - stats: &programStats{ - runtime: time.Duration(info.RunTimeNs), - runCount: info.RunCnt, - }, + Type: typ, + id: ProgramID(info.Id), + Tag: hex.EncodeToString(info.Tag[:]), + Name: unix.ByteSliceToString(info.Name[:]), + btf: btf.ID(info.BtfId), + jitedSize: info.JitedProgLen, + loadTime: time.Duration(info.LoadTime), + verifiedInstructions: info.VerifiedInsns, + } + + // Supplement OBJ_INFO with data from /proc/self/fdinfo. It contains fields + // like memlock that is not present in OBJ_INFO. + err2 := readProgramInfoFromProc(fd, &pi) + if err2 != nil && !errors.Is(err2, ErrNotSupported) { + return nil, fmt.Errorf("getting map info from fdinfo: %w", err2) + } + + if err1 != nil && err2 != nil { + return nil, fmt.Errorf("ObjInfo and fdinfo both failed: objinfo: %w, fdinfo: %w", err1, err2) + } + + if platform.IsWindows && info.Tag == [8]uint8{} { + // Windows doesn't support the tag field, clear it for now. + pi.Tag = "" } // Start with a clean struct for the second call, otherwise we may get EFAULT. var info2 sys.ProgInfo + makeSecondCall := false + if info.NrMapIds > 0 { pi.maps = make([]MapID, info.NrMapIds) info2.NrMapIds = info.NrMapIds - info2.MapIds = sys.NewPointer(unsafe.Pointer(&pi.maps[0])) + info2.MapIds = sys.SlicePointer(pi.maps) + makeSecondCall = true + } else if haveProgramInfoMapIDs() == nil { + // This program really has no associated maps. + pi.maps = make([]MapID, 0) + } else { + // The kernel doesn't report associated maps. + pi.maps = nil + } + + // createdByUID and NrMapIds were introduced in the same kernel version. + if pi.maps != nil && platform.IsLinux { + pi.createdByUID = info.CreatedByUid + pi.haveCreatedByUID = true } if info.XlatedProgLen > 0 { pi.insns = make([]byte, info.XlatedProgLen) info2.XlatedProgLen = info.XlatedProgLen - info2.XlatedProgInsns = sys.NewSlicePointer(pi.insns) + info2.XlatedProgInsns = sys.SlicePointer(pi.insns) + makeSecondCall = true + } + + if info.NrLineInfo > 0 { + pi.lineInfos = make([]byte, btf.LineInfoSize*info.NrLineInfo) + info2.LineInfo = sys.SlicePointer(pi.lineInfos) + info2.LineInfoRecSize = btf.LineInfoSize + info2.NrLineInfo = info.NrLineInfo + pi.numLineInfos = info.NrLineInfo + makeSecondCall = true + } + + if info.NrFuncInfo > 0 { + pi.funcInfos = make([]byte, btf.FuncInfoSize*info.NrFuncInfo) + info2.FuncInfo = sys.SlicePointer(pi.funcInfos) + info2.FuncInfoRecSize = btf.FuncInfoSize + info2.NrFuncInfo = info.NrFuncInfo + pi.numFuncInfos = info.NrFuncInfo + makeSecondCall = true + } + + pi.jitedInfo.lineInfoRecSize = info.JitedLineInfoRecSize + if info.JitedProgLen > 0 { + pi.jitedInfo.numInsns = info.JitedProgLen + pi.jitedInfo.insns = make([]byte, info.JitedProgLen) + info2.JitedProgLen = info.JitedProgLen + info2.JitedProgInsns = sys.SlicePointer(pi.jitedInfo.insns) + makeSecondCall = true + } + + if info.NrJitedFuncLens > 0 { + pi.jitedInfo.numFuncLens = info.NrJitedFuncLens + pi.jitedInfo.funcLens = make([]uint32, info.NrJitedFuncLens) + info2.NrJitedFuncLens = info.NrJitedFuncLens + info2.JitedFuncLens = sys.SlicePointer(pi.jitedInfo.funcLens) + makeSecondCall = true + } + + if info.NrJitedLineInfo > 0 { + pi.jitedInfo.numLineInfos = info.NrJitedLineInfo + pi.jitedInfo.lineInfos = make([]uint64, info.NrJitedLineInfo) + info2.NrJitedLineInfo = info.NrJitedLineInfo + info2.JitedLineInfo = sys.SlicePointer(pi.jitedInfo.lineInfos) + info2.JitedLineInfoRecSize = info.JitedLineInfoRecSize + makeSecondCall = true + } + + if info.NrJitedKsyms > 0 { + pi.jitedInfo.numKsyms = info.NrJitedKsyms + pi.jitedInfo.ksyms = make([]uint64, info.NrJitedKsyms) + info2.JitedKsyms = sys.SlicePointer(pi.jitedInfo.ksyms) + info2.NrJitedKsyms = info.NrJitedKsyms + makeSecondCall = true } - if info.NrMapIds > 0 || info.XlatedProgLen > 0 { + if makeSecondCall { if err := sys.ObjInfo(fd, &info2); err != nil { return nil, err } } + if info.XlatedProgLen > 0 && info2.XlatedProgInsns.IsNil() { + pi.restricted = true + pi.insns = nil + pi.lineInfos = nil + pi.funcInfos = nil + pi.jitedInfo = programJitedInfo{} + } + return &pi, nil } -func newProgramInfoFromProc(fd *sys.FD) (*ProgramInfo, error) { - var info ProgramInfo +func readProgramInfoFromProc(fd *sys.FD, pi *ProgramInfo) error { + var progType uint32 err := scanFdInfo(fd, map[string]interface{}{ - "prog_type": &info.Type, - "prog_tag": &info.Tag, + "prog_type": &progType, + "prog_tag": &pi.Tag, + "memlock": &pi.memlock, }) - if errors.Is(err, errMissingFields) { - return nil, &internal.UnsupportedFeatureError{ + if errors.Is(err, ErrNotSupported) && !errors.Is(err, internal.ErrNotSupportedOnOS) { + return &internal.UnsupportedFeatureError{ Name: "reading program info from /proc/self/fdinfo", MinimumVersion: internal.Version{4, 10, 0}, } } if err != nil { - return nil, err + return err + } + + pi.Type, err = ProgramTypeForPlatform(platform.Linux, progType) + if err != nil { + return fmt.Errorf("program type: %w", err) } - return &info, nil + return nil } // ID returns the program ID. @@ -175,8 +524,18 @@ func (pi *ProgramInfo) ID() (ProgramID, bool) { return pi.id, pi.id > 0 } +// CreatedByUID returns the Uid that created the program. +// +// Available from 4.15. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) CreatedByUID() (uint32, bool) { + return pi.createdByUID, pi.haveCreatedByUID +} + // BTFID returns the BTF ID associated with the program. // +// The ID is only valid as long as the associated program is kept alive. // Available from 5.0. // // The bool return value indicates whether this optional field is available and @@ -186,26 +545,61 @@ func (pi *ProgramInfo) BTFID() (btf.ID, bool) { return pi.btf, pi.btf > 0 } -// RunCount returns the total number of times the program was called. -// -// Can return 0 if the collection of statistics is not enabled. See EnableStats(). -// The bool return value indicates whether this optional field is available. -func (pi *ProgramInfo) RunCount() (uint64, bool) { - if pi.stats != nil { - return pi.stats.runCount, true +// btfSpec returns the BTF spec associated with the program. +func (pi *ProgramInfo) btfSpec() (*btf.Spec, error) { + id, ok := pi.BTFID() + if !ok { + return nil, fmt.Errorf("program created without BTF or unsupported kernel: %w", ErrNotSupported) + } + + h, err := btf.NewHandleFromID(id) + if err != nil { + return nil, fmt.Errorf("get BTF handle: %w", err) } - return 0, false + defer h.Close() + + spec, err := h.Spec(nil) + if err != nil { + return nil, fmt.Errorf("get BTF spec: %w", err) + } + + return spec, nil } -// Runtime returns the total accumulated runtime of the program. +// ErrRestrictedKernel is returned when kernel address information is restricted +// by kernel.kptr_restrict and/or net.core.bpf_jit_harden sysctls. +var ErrRestrictedKernel = internal.ErrRestrictedKernel + +// LineInfos returns the BTF line information of the program. // -// Can return 0 if the collection of statistics is not enabled. See EnableStats(). -// The bool return value indicates whether this optional field is available. -func (pi *ProgramInfo) Runtime() (time.Duration, bool) { - if pi.stats != nil { - return pi.stats.runtime, true +// Available from 5.0. +// +// Returns an error wrapping [ErrRestrictedKernel] if line infos are restricted +// by sysctls. +// +// Requires CAP_SYS_ADMIN or equivalent for reading BTF information. Returns +// ErrNotSupported if the program was created without BTF or if the kernel +// doesn't support the field. +func (pi *ProgramInfo) LineInfos() (btf.LineOffsets, error) { + if pi.restricted { + return nil, fmt.Errorf("line infos: %w", ErrRestrictedKernel) + } + + if len(pi.lineInfos) == 0 { + return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported) + } + + spec, err := pi.btfSpec() + if err != nil { + return nil, err } - return time.Duration(0), false + + return btf.LoadLineInfos( + bytes.NewReader(pi.lineInfos), + internal.NativeEndian, + pi.numLineInfos, + spec, + ) } // Instructions returns the 'xlated' instruction stream of the program @@ -214,12 +608,32 @@ func (pi *ProgramInfo) Runtime() (time.Duration, bool) { // inspecting loaded programs for troubleshooting, dumping, etc. // // For example, map accesses are made to reference their kernel map IDs, -// not the FDs they had when the program was inserted. +// not the FDs they had when the program was inserted. Note that before +// the introduction of bpf_insn_prepare_dump in kernel 4.16, xlated +// instructions were not sanitized, making the output even less reusable +// and less likely to round-trip or evaluate to the same program Tag. // // The first instruction is marked as a symbol using the Program's name. // -// Available from 4.13. Requires CAP_BPF or equivalent. +// If available, the instructions will be annotated with metadata from the +// BTF. This includes line information and function information. Reading +// this metadata requires CAP_SYS_ADMIN or equivalent. If capability is +// unavailable, the instructions will be returned without metadata. +// +// Returns an error wrapping [ErrRestrictedKernel] if instructions are +// restricted by sysctls. +// +// Available from 4.13. Requires CAP_BPF or equivalent for plain instructions. +// Requires CAP_SYS_ADMIN for instructions with metadata. func (pi *ProgramInfo) Instructions() (asm.Instructions, error) { + if platform.IsWindows && len(pi.insns) == 0 { + return nil, fmt.Errorf("read instructions: %w", internal.ErrNotSupportedOnOS) + } + + if pi.restricted { + return nil, fmt.Errorf("instructions: %w", ErrRestrictedKernel) + } + // If the calling process is not BPF-capable or if the kernel doesn't // support getting xlated instructions, the field will be zero. if len(pi.insns) == 0 { @@ -227,14 +641,99 @@ func (pi *ProgramInfo) Instructions() (asm.Instructions, error) { } r := bytes.NewReader(pi.insns) - var insns asm.Instructions - if err := insns.Unmarshal(r, internal.NativeEndian); err != nil { + insns, err := asm.AppendInstructions(nil, r, internal.NativeEndian, platform.Native) + if err != nil { return nil, fmt.Errorf("unmarshaling instructions: %w", err) } - // Tag the first instruction with the name of the program, if available. - insns[0] = insns[0].Sym(pi.Name) + if pi.btf != 0 { + btfh, err := btf.NewHandleFromID(pi.btf) + if err != nil { + // Getting a BTF handle requires CAP_SYS_ADMIN, if not available we get an -EPERM. + // Ignore it and fall back to instructions without metadata. + if !errors.Is(err, unix.EPERM) { + return nil, fmt.Errorf("unable to get BTF handle: %w", err) + } + } + + // If we have a BTF handle, we can use it to assign metadata to the instructions. + if btfh != nil { + defer btfh.Close() + + spec, err := btfh.Spec(nil) + if err != nil { + return nil, fmt.Errorf("unable to get BTF spec: %w", err) + } + + lineInfos, err := btf.LoadLineInfos( + bytes.NewReader(pi.lineInfos), + internal.NativeEndian, + pi.numLineInfos, + spec, + ) + if err != nil { + return nil, fmt.Errorf("parse line info: %w", err) + } + + funcInfos, err := btf.LoadFuncInfos( + bytes.NewReader(pi.funcInfos), + internal.NativeEndian, + pi.numFuncInfos, + spec, + ) + if err != nil { + return nil, fmt.Errorf("parse func info: %w", err) + } + + btf.AssignMetadataToInstructions(insns, funcInfos, lineInfos, btf.CORERelocationInfos{}) + } + } + + fn := btf.FuncMetadata(&insns[0]) + name := pi.Name + if fn != nil { + name = fn.Name + } + insns[0] = insns[0].WithSymbol(name) + + return insns, nil +} + +// JitedSize returns the size of the program's JIT-compiled machine code in +// bytes, which is the actual code executed on the host's CPU. This field +// requires the BPF JIT compiler to be enabled. +// +// Returns an error wrapping [ErrRestrictedKernel] if jited program size is +// restricted by sysctls. +// +// Available from 4.13. Reading this metadata requires CAP_BPF or equivalent. +func (pi *ProgramInfo) JitedSize() (uint32, error) { + if pi.restricted { + return 0, fmt.Errorf("jited size: %w", ErrRestrictedKernel) + } + + if pi.jitedSize == 0 { + return 0, fmt.Errorf("insufficient permissions, unsupported kernel, or JIT compiler disabled: %w", ErrNotSupported) + } + return pi.jitedSize, nil +} + +// TranslatedSize returns the size of the program's translated instructions in +// bytes, after it has been verified and rewritten by the kernel. +// +// Returns an error wrapping [ErrRestrictedKernel] if translated instructions +// are restricted by sysctls. +// +// Available from 4.13. Reading this metadata requires CAP_BPF or equivalent. +func (pi *ProgramInfo) TranslatedSize() (int, error) { + if pi.restricted { + return 0, fmt.Errorf("xlated size: %w", ErrRestrictedKernel) + } + insns := len(pi.insns) + if insns == 0 { + return 0, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported) + } return insns, nil } @@ -247,7 +746,127 @@ func (pi *ProgramInfo) MapIDs() ([]MapID, bool) { return pi.maps, pi.maps != nil } +// LoadTime returns when the program was loaded since boot time. +// +// Available from 4.15. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) LoadTime() (time.Duration, bool) { + // loadTime and NrMapIds were introduced in the same kernel version. + return pi.loadTime, pi.loadTime > 0 +} + +// VerifiedInstructions returns the number verified instructions in the program. +// +// Available from 5.16. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) VerifiedInstructions() (uint32, bool) { + return pi.verifiedInstructions, pi.verifiedInstructions > 0 +} + +// JitedKsymAddrs returns the ksym addresses of the BPF program, including its +// subprograms. The addresses correspond to their symbols in /proc/kallsyms. +// +// Available from 4.18. Note that before 5.x, this field can be empty for +// programs without subprograms (bpf2bpf calls). +// +// The bool return value indicates whether this optional field is available. +// +// When a kernel address can't fit into uintptr (which is usually the case when +// running 32 bit program on a 64 bit kernel), this returns an empty slice and +// a false. +func (pi *ProgramInfo) JitedKsymAddrs() ([]uintptr, bool) { + ksyms := make([]uintptr, 0, len(pi.jitedInfo.ksyms)) + if cap(ksyms) == 0 { + return ksyms, false + } + // Check if a kernel address fits into uintptr (it might not when + // using a 32 bit binary on a 64 bit kernel). This check should work + // with any kernel address, since they have 1s at the highest bits. + if a := pi.jitedInfo.ksyms[0]; uint64(uintptr(a)) != a { + return nil, false + } + for _, ksym := range pi.jitedInfo.ksyms { + ksyms = append(ksyms, uintptr(ksym)) + } + return ksyms, true +} + +// JitedInsns returns the JITed machine native instructions of the program. +// +// Available from 4.13. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) JitedInsns() ([]byte, bool) { + return pi.jitedInfo.insns, len(pi.jitedInfo.insns) > 0 +} + +// JitedLineInfos returns the JITed line infos of the program. +// +// Available from 5.0. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) JitedLineInfos() ([]uint64, bool) { + return pi.jitedInfo.lineInfos, len(pi.jitedInfo.lineInfos) > 0 +} + +// JitedFuncLens returns the insns length of each function in the JITed program. +// +// Available from 4.18. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) JitedFuncLens() ([]uint32, bool) { + return pi.jitedInfo.funcLens, len(pi.jitedInfo.funcLens) > 0 +} + +// FuncInfos returns the offset and function information of all (sub)programs in +// a BPF program. +// +// Available from 5.0. +// +// Returns an error wrapping [ErrRestrictedKernel] if function information is +// restricted by sysctls. +// +// Requires CAP_SYS_ADMIN or equivalent for reading BTF information. Returns +// ErrNotSupported if the program was created without BTF or if the kernel +// doesn't support the field. +func (pi *ProgramInfo) FuncInfos() (btf.FuncOffsets, error) { + if pi.restricted { + return nil, fmt.Errorf("func infos: %w", ErrRestrictedKernel) + } + + if len(pi.funcInfos) == 0 { + return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported) + } + + spec, err := pi.btfSpec() + if err != nil { + return nil, err + } + + return btf.LoadFuncInfos( + bytes.NewReader(pi.funcInfos), + internal.NativeEndian, + pi.numFuncInfos, + spec, + ) +} + +// ProgramInfo returns an approximate number of bytes allocated to this program. +// +// Available from 4.10. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) Memlock() (uint64, bool) { + return pi.memlock, pi.memlock > 0 +} + func scanFdInfo(fd *sys.FD, fields map[string]interface{}) error { + if platform.IsWindows { + return fmt.Errorf("read fdinfo: %w", internal.ErrNotSupportedOnOS) + } + fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", fd.Int())) if err != nil { return err @@ -260,54 +879,73 @@ func scanFdInfo(fd *sys.FD, fields map[string]interface{}) error { return nil } -var errMissingFields = errors.New("missing fields") - func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error { var ( scanner = bufio.NewScanner(r) scanned int + reader bytes.Reader ) for scanner.Scan() { - parts := strings.SplitN(scanner.Text(), "\t", 2) - if len(parts) != 2 { + key, rest, found := bytes.Cut(scanner.Bytes(), []byte(":")) + if !found { + // Line doesn't contain a colon, skip. continue } - - name := strings.TrimSuffix(parts[0], ":") - field, ok := fields[string(name)] + field, ok := fields[string(key)] if !ok { continue } + // If field already contains a non-zero value, don't overwrite it with fdinfo. + if !zero(field) { + scanned++ + continue + } - if n, err := fmt.Sscanln(parts[1], field); err != nil || n != 1 { - return fmt.Errorf("can't parse field %s: %v", name, err) + // Cut the \t following the : as well as any potential trailing whitespace. + rest = bytes.TrimSpace(rest) + + reader.Reset(rest) + if n, err := fmt.Fscan(&reader, field); err != nil || n != 1 { + return fmt.Errorf("can't parse field %s: %v", key, err) } scanned++ } if err := scanner.Err(); err != nil { - return err + return fmt.Errorf("scanning fdinfo: %w", err) } if len(fields) > 0 && scanned == 0 { return ErrNotSupported } - if scanned != len(fields) { - return errMissingFields + return nil +} + +func zero(arg any) bool { + v := reflect.ValueOf(arg) + + // Unwrap pointers and interfaces. + for v.Kind() == reflect.Pointer || + v.Kind() == reflect.Interface { + v = v.Elem() } - return nil + return v.IsZero() } -// EnableStats starts the measuring of the runtime -// and run counts of eBPF programs. +// EnableStats starts collecting runtime statistics of eBPF programs, like the +// amount of program executions and the cumulative runtime. +// +// Specify a BPF_STATS_* constant to select which statistics to collect, like +// [unix.BPF_STATS_RUN_TIME]. Closing the returned [io.Closer] will stop +// collecting statistics. // -// Collecting statistics can have an impact on the performance. +// Collecting statistics may have a performance impact. // -// Requires at least 5.8. +// Requires at least Linux 5.8. func EnableStats(which uint32) (io.Closer, error) { fd, err := sys.EnableStats(&sys.EnableStatsAttr{ Type: which, @@ -317,3 +955,35 @@ func EnableStats(which uint32) (io.Closer, error) { } return fd, nil } + +var haveProgramInfoMapIDs = internal.NewFeatureTest("map IDs in program info", func() error { + if platform.IsWindows { + // We only support efW versions which have this feature, no need to probe. + return nil + } + + prog, err := progLoad(asm.Instructions{ + asm.LoadImm(asm.R0, 0, asm.DWord), + asm.Return(), + }, SocketFilter, "MIT") + if err != nil { + return err + } + defer prog.Close() + + err = sys.ObjInfo(prog, &sys.ProgInfo{ + // NB: Don't need to allocate MapIds since the program isn't using + // any maps. + NrMapIds: 1, + }) + if errors.Is(err, unix.EINVAL) { + // Most likely the syscall doesn't exist. + return internal.ErrNotSupported + } + if errors.Is(err, unix.E2BIG) { + // We've hit check_uarg_tail_zero on older kernels. + return internal.ErrNotSupported + } + + return err +}, "4.15", "windows:0.21.0") diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/align.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/align.go deleted file mode 100644 index 8b4f2658e..000000000 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/align.go +++ /dev/null @@ -1,6 +0,0 @@ -package internal - -// Align returns 'n' updated to 'alignment' boundary. -func Align(n, alignment int) int { - return (int(n) + alignment - 1) / alignment * alignment -} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/btf.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/btf.go deleted file mode 100644 index 4b885599c..000000000 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/btf.go +++ /dev/null @@ -1,891 +0,0 @@ -package btf - -import ( - "bytes" - "debug/elf" - "encoding/binary" - "errors" - "fmt" - "io" - "math" - "os" - "reflect" - "sync" - - "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/sys" - "github.com/cilium/ebpf/internal/unix" -) - -const btfMagic = 0xeB9F - -// Errors returned by BTF functions. -var ( - ErrNotSupported = internal.ErrNotSupported - ErrNotFound = errors.New("not found") - ErrNoExtendedInfo = errors.New("no extended info") -) - -// ID represents the unique ID of a BTF object. -type ID uint32 - -// Spec represents decoded BTF. -type Spec struct { - // Data from .BTF. - rawTypes []rawType - strings stringTable - - // Inflated Types. - types []Type - - // Types indexed by essential name. - // Includes all struct flavors and types with the same name. - namedTypes map[essentialName][]Type - - // Data from .BTF.ext. - funcInfos map[string]FuncInfo - lineInfos map[string]LineInfos - coreRelos map[string]CoreRelos - - byteOrder binary.ByteOrder -} - -type btfHeader struct { - Magic uint16 - Version uint8 - Flags uint8 - HdrLen uint32 - - TypeOff uint32 - TypeLen uint32 - StringOff uint32 - StringLen uint32 -} - -// typeStart returns the offset from the beginning of the .BTF section -// to the start of its type entries. -func (h *btfHeader) typeStart() int64 { - return int64(h.HdrLen + h.TypeOff) -} - -// stringStart returns the offset from the beginning of the .BTF section -// to the start of its string table. -func (h *btfHeader) stringStart() int64 { - return int64(h.HdrLen + h.StringOff) -} - -// LoadSpecFromReader reads from an ELF or a raw BTF blob. -// -// Returns ErrNotFound if reading from an ELF which contains no BTF. -func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) { - file, err := internal.NewSafeELFFile(rd) - if err != nil { - if bo := guessRawBTFByteOrder(rd); bo != nil { - // Try to parse a naked BTF blob. This will return an error if - // we encounter a Datasec, since we can't fix it up. - return loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil, nil) - } - - return nil, err - } - defer file.Close() - - symbols, err := file.Symbols() - if err != nil { - return nil, fmt.Errorf("can't read symbols: %v", err) - } - - variableOffsets := make(map[variable]uint32) - for _, symbol := range symbols { - if idx := symbol.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE { - // Ignore things like SHN_ABS - continue - } - - if int(symbol.Section) >= len(file.Sections) { - return nil, fmt.Errorf("symbol %s: invalid section %d", symbol.Name, symbol.Section) - } - - secName := file.Sections[symbol.Section].Name - if symbol.Value > math.MaxUint32 { - return nil, fmt.Errorf("section %s: symbol %s: size exceeds maximum", secName, symbol.Name) - } - - variableOffsets[variable{secName, symbol.Name}] = uint32(symbol.Value) - } - - return loadSpecFromELF(file, variableOffsets) -} - -func loadSpecFromELF(file *internal.SafeELFFile, variableOffsets map[variable]uint32) (*Spec, error) { - var ( - btfSection *elf.Section - btfExtSection *elf.Section - sectionSizes = make(map[string]uint32) - ) - - for _, sec := range file.Sections { - switch sec.Name { - case ".BTF": - btfSection = sec - case ".BTF.ext": - btfExtSection = sec - default: - if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS { - break - } - - if sec.Size > math.MaxUint32 { - return nil, fmt.Errorf("section %s exceeds maximum size", sec.Name) - } - - sectionSizes[sec.Name] = uint32(sec.Size) - } - } - - if btfSection == nil { - return nil, fmt.Errorf("btf: %w", ErrNotFound) - } - - spec, err := loadRawSpec(btfSection.Open(), file.ByteOrder, sectionSizes, variableOffsets) - if err != nil { - return nil, err - } - - if btfExtSection == nil { - return spec, nil - } - - if btfExtSection.ReaderAt == nil { - return nil, fmt.Errorf("compressed ext_info is not supported") - } - - extInfo, err := loadExtInfos(btfExtSection, file.ByteOrder, spec.strings) - if err != nil { - return nil, fmt.Errorf("can't parse ext info: %w", err) - } - - if err := spec.splitExtInfos(extInfo); err != nil { - return nil, fmt.Errorf("linking funcInfos and lineInfos: %w", err) - } - - return spec, nil -} - -// splitExtInfos takes FuncInfos, LineInfos and CoreRelos indexed by section and -// transforms them to be indexed by function. Retrieves function names from -// the BTF spec. -func (spec *Spec) splitExtInfos(info *extInfo) error { - - ofi := make(map[string]FuncInfo) - oli := make(map[string]LineInfos) - ocr := make(map[string]CoreRelos) - - for secName, secFuncs := range info.funcInfos { - // Collect functions from each section and organize them by name. - for _, fi := range secFuncs { - name, err := fi.Name(spec) - if err != nil { - return fmt.Errorf("looking up function name: %w", err) - } - - // FuncInfo offsets are scoped to the ELF section. Zero them out - // since they are meaningless outside of that context. The linker - // will determine the offset of the function within the final - // instruction stream before handing it off to the kernel. - fi.InsnOff = 0 - - ofi[name] = fi - } - - // Attribute LineInfo records to their respective functions, if any. - if lines := info.lineInfos[secName]; lines != nil { - for _, li := range lines { - fi := secFuncs.funcForOffset(li.InsnOff) - if fi == nil { - return fmt.Errorf("section %s: error looking up FuncInfo for LineInfo %v", secName, li) - } - - // Offsets are ELF section-scoped, make them function-scoped by - // subtracting the function's start offset. - li.InsnOff -= fi.InsnOff - - name, err := fi.Name(spec) - if err != nil { - return fmt.Errorf("looking up function name: %w", err) - } - - oli[name] = append(oli[name], li) - } - } - - // Attribute CO-RE relocations to their respective functions, if any. - if relos := info.relos[secName]; relos != nil { - for _, r := range relos { - fi := secFuncs.funcForOffset(r.insnOff) - if fi == nil { - return fmt.Errorf("section %s: error looking up FuncInfo for CO-RE relocation %v", secName, r) - } - - // Offsets are ELF section-scoped, make them function-scoped by - // subtracting the function's start offset. - r.insnOff -= fi.InsnOff - - name, err := fi.Name(spec) - if err != nil { - return fmt.Errorf("looking up function name: %w", err) - } - - ocr[name] = append(ocr[name], r) - } - } - } - - spec.funcInfos = ofi - spec.lineInfos = oli - spec.coreRelos = ocr - - return nil -} - -func loadRawSpec(btf io.Reader, bo binary.ByteOrder, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) (*Spec, error) { - rawTypes, rawStrings, err := parseBTF(btf, bo) - if err != nil { - return nil, err - } - - err = fixupDatasec(rawTypes, rawStrings, sectionSizes, variableOffsets) - if err != nil { - return nil, err - } - - types, typesByName, err := inflateRawTypes(rawTypes, rawStrings) - if err != nil { - return nil, err - } - - return &Spec{ - rawTypes: rawTypes, - namedTypes: typesByName, - types: types, - strings: rawStrings, - byteOrder: bo, - }, nil -} - -var kernelBTF struct { - sync.Mutex - *Spec -} - -// LoadKernelSpec returns the current kernel's BTF information. -// -// Requires a >= 5.5 kernel with CONFIG_DEBUG_INFO_BTF enabled. Returns -// ErrNotSupported if BTF is not enabled. -func LoadKernelSpec() (*Spec, error) { - kernelBTF.Lock() - defer kernelBTF.Unlock() - - if kernelBTF.Spec != nil { - return kernelBTF.Spec, nil - } - - var err error - kernelBTF.Spec, err = loadKernelSpec() - return kernelBTF.Spec, err -} - -func loadKernelSpec() (*Spec, error) { - fh, err := os.Open("/sys/kernel/btf/vmlinux") - if err == nil { - defer fh.Close() - - return loadRawSpec(fh, internal.NativeEndian, nil, nil) - } - - var uname unix.Utsname - if err := unix.Uname(&uname); err != nil { - return nil, fmt.Errorf("uname failed: %w", err) - } - - end := bytes.IndexByte(uname.Release[:], 0) - release := string(uname.Release[:end]) - - // use same list of locations as libbpf - // https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122 - locations := []string{ - "/boot/vmlinux-%s", - "/lib/modules/%s/vmlinux-%[1]s", - "/lib/modules/%s/build/vmlinux", - "/usr/lib/modules/%s/kernel/vmlinux", - "/usr/lib/debug/boot/vmlinux-%s", - "/usr/lib/debug/boot/vmlinux-%s.debug", - "/usr/lib/debug/lib/modules/%s/vmlinux", - } - - for _, loc := range locations { - path := fmt.Sprintf(loc, release) - - fh, err := os.Open(path) - if err != nil { - continue - } - defer fh.Close() - - file, err := internal.NewSafeELFFile(fh) - if err != nil { - return nil, err - } - defer file.Close() - - return loadSpecFromELF(file, nil) - } - - return nil, fmt.Errorf("no BTF for kernel version %s: %w", release, internal.ErrNotSupported) -} - -// parseBTFHeader parses the header of the .BTF section. -func parseBTFHeader(r io.Reader, bo binary.ByteOrder) (*btfHeader, error) { - var header btfHeader - if err := binary.Read(r, bo, &header); err != nil { - return nil, fmt.Errorf("can't read header: %v", err) - } - - if header.Magic != btfMagic { - return nil, fmt.Errorf("incorrect magic value %v", header.Magic) - } - - if header.Version != 1 { - return nil, fmt.Errorf("unexpected version %v", header.Version) - } - - if header.Flags != 0 { - return nil, fmt.Errorf("unsupported flags %v", header.Flags) - } - - remainder := int64(header.HdrLen) - int64(binary.Size(&header)) - if remainder < 0 { - return nil, errors.New("header length shorter than btfHeader size") - } - - if _, err := io.CopyN(internal.DiscardZeroes{}, r, remainder); err != nil { - return nil, fmt.Errorf("header padding: %v", err) - } - - return &header, nil -} - -func guessRawBTFByteOrder(r io.ReaderAt) binary.ByteOrder { - for _, bo := range []binary.ByteOrder{ - binary.LittleEndian, - binary.BigEndian, - } { - if _, err := parseBTFHeader(io.NewSectionReader(r, 0, math.MaxInt64), bo); err == nil { - return bo - } - } - - return nil -} - -// parseBTF reads a .BTF section into memory and parses it into a list of -// raw types and a string table. -func parseBTF(btf io.Reader, bo binary.ByteOrder) ([]rawType, stringTable, error) { - rawBTF, err := io.ReadAll(btf) - if err != nil { - return nil, nil, fmt.Errorf("can't read BTF: %v", err) - } - rd := bytes.NewReader(rawBTF) - - header, err := parseBTFHeader(rd, bo) - if err != nil { - return nil, nil, fmt.Errorf("parsing .BTF header: %v", err) - } - - buf := io.NewSectionReader(rd, header.stringStart(), int64(header.StringLen)) - rawStrings, err := readStringTable(buf) - if err != nil { - return nil, nil, fmt.Errorf("can't read type names: %w", err) - } - - buf = io.NewSectionReader(rd, header.typeStart(), int64(header.TypeLen)) - rawTypes, err := readTypes(buf, bo) - if err != nil { - return nil, nil, fmt.Errorf("can't read types: %w", err) - } - - return rawTypes, rawStrings, nil -} - -type variable struct { - section string - name string -} - -func fixupDatasec(rawTypes []rawType, rawStrings stringTable, sectionSizes map[string]uint32, variableOffsets map[variable]uint32) error { - for i, rawType := range rawTypes { - if rawType.Kind() != kindDatasec { - continue - } - - name, err := rawStrings.Lookup(rawType.NameOff) - if err != nil { - return err - } - - if name == ".kconfig" || name == ".ksyms" { - return fmt.Errorf("reference to %s: %w", name, ErrNotSupported) - } - - if rawTypes[i].SizeType != 0 { - continue - } - - size, ok := sectionSizes[name] - if !ok { - return fmt.Errorf("data section %s: missing size", name) - } - - rawTypes[i].SizeType = size - - secinfos := rawType.data.([]btfVarSecinfo) - for j, secInfo := range secinfos { - id := int(secInfo.Type - 1) - if id >= len(rawTypes) { - return fmt.Errorf("data section %s: invalid type id %d for variable %d", name, id, j) - } - - varName, err := rawStrings.Lookup(rawTypes[id].NameOff) - if err != nil { - return fmt.Errorf("data section %s: can't get name for type %d: %w", name, id, err) - } - - offset, ok := variableOffsets[variable{name, varName}] - if !ok { - return fmt.Errorf("data section %s: missing offset for variable %s", name, varName) - } - - secinfos[j].Offset = offset - } - } - - return nil -} - -// Copy creates a copy of Spec. -func (s *Spec) Copy() *Spec { - types, _ := copyTypes(s.types, nil) - - namedTypes := make(map[essentialName][]Type) - for _, typ := range types { - if name := typ.TypeName(); name != "" { - en := newEssentialName(name) - namedTypes[en] = append(namedTypes[en], typ) - } - } - - // NB: Other parts of spec are not copied since they are immutable. - return &Spec{ - s.rawTypes, - s.strings, - types, - namedTypes, - s.funcInfos, - s.lineInfos, - s.coreRelos, - s.byteOrder, - } -} - -type marshalOpts struct { - ByteOrder binary.ByteOrder - StripFuncLinkage bool -} - -func (s *Spec) marshal(opts marshalOpts) ([]byte, error) { - var ( - buf bytes.Buffer - header = new(btfHeader) - headerLen = binary.Size(header) - ) - - // Reserve space for the header. We have to write it last since - // we don't know the size of the type section yet. - _, _ = buf.Write(make([]byte, headerLen)) - - // Write type section, just after the header. - for _, raw := range s.rawTypes { - switch { - case opts.StripFuncLinkage && raw.Kind() == kindFunc: - raw.SetLinkage(StaticFunc) - } - - if err := raw.Marshal(&buf, opts.ByteOrder); err != nil { - return nil, fmt.Errorf("can't marshal BTF: %w", err) - } - } - - typeLen := uint32(buf.Len() - headerLen) - - // Write string section after type section. - _, _ = buf.Write(s.strings) - - // Fill out the header, and write it out. - header = &btfHeader{ - Magic: btfMagic, - Version: 1, - Flags: 0, - HdrLen: uint32(headerLen), - TypeOff: 0, - TypeLen: typeLen, - StringOff: typeLen, - StringLen: uint32(len(s.strings)), - } - - raw := buf.Bytes() - err := binary.Write(sliceWriter(raw[:headerLen]), opts.ByteOrder, header) - if err != nil { - return nil, fmt.Errorf("can't write header: %v", err) - } - - return raw, nil -} - -type sliceWriter []byte - -func (sw sliceWriter) Write(p []byte) (int, error) { - if len(p) != len(sw) { - return 0, errors.New("size doesn't match") - } - - return copy(sw, p), nil -} - -// Program finds the BTF for a specific function. -// -// Returns an error which may wrap ErrNoExtendedInfo if the Spec doesn't -// contain extended BTF info. -func (s *Spec) Program(name string) (*Program, error) { - if s.funcInfos == nil && s.lineInfos == nil && s.coreRelos == nil { - return nil, fmt.Errorf("BTF for function %s: %w", name, ErrNoExtendedInfo) - } - - funcInfos, funcOK := s.funcInfos[name] - lineInfos, lineOK := s.lineInfos[name] - relos, coreOK := s.coreRelos[name] - - if !funcOK && !lineOK && !coreOK { - return nil, fmt.Errorf("no extended BTF info for function %s", name) - } - - return &Program{s, funcInfos, lineInfos, relos}, nil -} - -// TypeByID returns the BTF Type with the given type ID. -// -// Returns an error wrapping ErrNotFound if a Type with the given ID -// does not exist in the Spec. -func (s *Spec) TypeByID(id TypeID) (Type, error) { - if int(id) > len(s.types) { - return nil, fmt.Errorf("type ID %d: %w", id, ErrNotFound) - } - return s.types[id], nil -} - -// AnyTypesByName returns a list of BTF Types with the given name. -// -// If the BTF blob describes multiple compilation units like vmlinux, multiple -// Types with the same name and kind can exist, but might not describe the same -// data structure. -// -// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec. -func (s *Spec) AnyTypesByName(name string) ([]Type, error) { - types := s.namedTypes[newEssentialName(name)] - if len(types) == 0 { - return nil, fmt.Errorf("type name %s: %w", name, ErrNotFound) - } - - // Return a copy to prevent changes to namedTypes. - result := make([]Type, 0, len(types)) - for _, t := range types { - // Match against the full name, not just the essential one - // in case the type being looked up is a struct flavor. - if t.TypeName() == name { - result = append(result, t) - } - } - return result, nil -} - -// TypeByName searches for a Type with a specific name. Since multiple -// Types with the same name can exist, the parameter typ is taken to -// narrow down the search in case of a clash. -// -// typ must be a non-nil pointer to an implementation of a Type. -// On success, the address of the found Type will be copied to typ. -// -// Returns an error wrapping ErrNotFound if no matching -// Type exists in the Spec. If multiple candidates are found, -// an error is returned. -func (s *Spec) TypeByName(name string, typ interface{}) error { - typValue := reflect.ValueOf(typ) - if typValue.Kind() != reflect.Ptr { - return fmt.Errorf("%T is not a pointer", typ) - } - - typPtr := typValue.Elem() - if !typPtr.CanSet() { - return fmt.Errorf("%T cannot be set", typ) - } - - wanted := typPtr.Type() - if !wanted.AssignableTo(reflect.TypeOf((*Type)(nil)).Elem()) { - return fmt.Errorf("%T does not satisfy Type interface", typ) - } - - types, err := s.AnyTypesByName(name) - if err != nil { - return err - } - - var candidate Type - for _, typ := range types { - if reflect.TypeOf(typ) != wanted { - continue - } - - if candidate != nil { - return fmt.Errorf("type %s: multiple candidates for %T", name, typ) - } - - candidate = typ - } - - if candidate == nil { - return fmt.Errorf("type %s: %w", name, ErrNotFound) - } - - typPtr.Set(reflect.ValueOf(candidate)) - - return nil -} - -// Handle is a reference to BTF loaded into the kernel. -type Handle struct { - spec *Spec - fd *sys.FD -} - -// NewHandle loads BTF into the kernel. -// -// Returns ErrNotSupported if BTF is not supported. -func NewHandle(spec *Spec) (*Handle, error) { - if err := haveBTF(); err != nil { - return nil, err - } - - if spec.byteOrder != internal.NativeEndian { - return nil, fmt.Errorf("can't load %s BTF on %s", spec.byteOrder, internal.NativeEndian) - } - - btf, err := spec.marshal(marshalOpts{ - ByteOrder: internal.NativeEndian, - StripFuncLinkage: haveFuncLinkage() != nil, - }) - if err != nil { - return nil, fmt.Errorf("can't marshal BTF: %w", err) - } - - if uint64(len(btf)) > math.MaxUint32 { - return nil, errors.New("BTF exceeds the maximum size") - } - - attr := &sys.BtfLoadAttr{ - Btf: sys.NewSlicePointer(btf), - BtfSize: uint32(len(btf)), - } - - fd, err := sys.BtfLoad(attr) - if err != nil { - logBuf := make([]byte, 64*1024) - attr.BtfLogBuf = sys.NewSlicePointer(logBuf) - attr.BtfLogSize = uint32(len(logBuf)) - attr.BtfLogLevel = 1 - _, logErr := sys.BtfLoad(attr) - return nil, internal.ErrorWithLog(err, logBuf, logErr) - } - - return &Handle{spec.Copy(), fd}, nil -} - -// NewHandleFromID returns the BTF handle for a given id. -// -// Returns ErrNotExist, if there is no BTF with the given id. -// -// Requires CAP_SYS_ADMIN. -func NewHandleFromID(id ID) (*Handle, error) { - fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{ - Id: uint32(id), - }) - if err != nil { - return nil, fmt.Errorf("get BTF by id: %w", err) - } - - info, err := newInfoFromFd(fd) - if err != nil { - _ = fd.Close() - return nil, fmt.Errorf("get BTF spec for handle: %w", err) - } - - return &Handle{info.BTF, fd}, nil -} - -// Spec returns the Spec that defined the BTF loaded into the kernel. -func (h *Handle) Spec() *Spec { - return h.spec -} - -// Close destroys the handle. -// -// Subsequent calls to FD will return an invalid value. -func (h *Handle) Close() error { - return h.fd.Close() -} - -// FD returns the file descriptor for the handle. -func (h *Handle) FD() int { - return h.fd.Int() -} - -// Map is the BTF for a map. -type Map struct { - Spec *Spec - Key, Value Type -} - -// Program is the BTF information for a stream of instructions. -type Program struct { - spec *Spec - FuncInfo FuncInfo - LineInfos LineInfos - CoreRelos CoreRelos -} - -// Spec returns the BTF spec of this program. -func (p *Program) Spec() *Spec { - return p.spec -} - -// Fixups returns the changes required to adjust the program to the target. -// -// Passing a nil target will relocate against the running kernel. -func (p *Program) Fixups(target *Spec) (COREFixups, error) { - if len(p.CoreRelos) == 0 { - return nil, nil - } - - if target == nil { - var err error - target, err = LoadKernelSpec() - if err != nil { - return nil, err - } - } - - return coreRelocate(p.spec, target, p.CoreRelos) -} - -func marshalBTF(types interface{}, strings []byte, bo binary.ByteOrder) []byte { - const minHeaderLength = 24 - - typesLen := uint32(binary.Size(types)) - header := btfHeader{ - Magic: btfMagic, - Version: 1, - HdrLen: minHeaderLength, - TypeOff: 0, - TypeLen: typesLen, - StringOff: typesLen, - StringLen: uint32(len(strings)), - } - - buf := new(bytes.Buffer) - _ = binary.Write(buf, bo, &header) - _ = binary.Write(buf, bo, types) - buf.Write(strings) - - return buf.Bytes() -} - -var haveBTF = internal.FeatureTest("BTF", "5.1", func() error { - var ( - types struct { - Integer btfType - Var btfType - btfVar struct{ Linkage uint32 } - } - strings = []byte{0, 'a', 0} - ) - - // We use a BTF_KIND_VAR here, to make sure that - // the kernel understands BTF at least as well as we - // do. BTF_KIND_VAR was introduced ~5.1. - types.Integer.SetKind(kindPointer) - types.Var.NameOff = 1 - types.Var.SetKind(kindVar) - types.Var.SizeType = 1 - - btf := marshalBTF(&types, strings, internal.NativeEndian) - - fd, err := sys.BtfLoad(&sys.BtfLoadAttr{ - Btf: sys.NewSlicePointer(btf), - BtfSize: uint32(len(btf)), - }) - if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { - // Treat both EINVAL and EPERM as not supported: loading the program - // might still succeed without BTF. - return internal.ErrNotSupported - } - if err != nil { - return err - } - - fd.Close() - return nil -}) - -var haveFuncLinkage = internal.FeatureTest("BTF func linkage", "5.6", func() error { - if err := haveBTF(); err != nil { - return err - } - - var ( - types struct { - FuncProto btfType - Func btfType - } - strings = []byte{0, 'a', 0} - ) - - types.FuncProto.SetKind(kindFuncProto) - types.Func.SetKind(kindFunc) - types.Func.SizeType = 1 // aka FuncProto - types.Func.NameOff = 1 - types.Func.SetLinkage(GlobalFunc) - - btf := marshalBTF(&types, strings, internal.NativeEndian) - - fd, err := sys.BtfLoad(&sys.BtfLoadAttr{ - Btf: sys.NewSlicePointer(btf), - BtfSize: uint32(len(btf)), - }) - if errors.Is(err, unix.EINVAL) { - return internal.ErrNotSupported - } - if err != nil { - return err - } - - fd.Close() - return nil -}) diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go deleted file mode 100644 index d98c73ca5..000000000 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/btf_types.go +++ /dev/null @@ -1,287 +0,0 @@ -package btf - -import ( - "encoding/binary" - "fmt" - "io" -) - -//go:generate stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage - -// btfKind describes a Type. -type btfKind uint8 - -// Equivalents of the BTF_KIND_* constants. -const ( - kindUnknown btfKind = iota - kindInt - kindPointer - kindArray - kindStruct - kindUnion - kindEnum - kindForward - kindTypedef - kindVolatile - kindConst - kindRestrict - // Added ~4.20 - kindFunc - kindFuncProto - // Added ~5.1 - kindVar - kindDatasec - // Added ~5.13 - kindFloat -) - -// FuncLinkage describes BTF function linkage metadata. -type FuncLinkage int - -// Equivalent of enum btf_func_linkage. -const ( - StaticFunc FuncLinkage = iota // static - GlobalFunc // global - ExternFunc // extern -) - -// VarLinkage describes BTF variable linkage metadata. -type VarLinkage int - -const ( - StaticVar VarLinkage = iota // static - GlobalVar // global - ExternVar // extern -) - -const ( - btfTypeKindShift = 24 - btfTypeKindLen = 5 - btfTypeVlenShift = 0 - btfTypeVlenMask = 16 - btfTypeKindFlagShift = 31 - btfTypeKindFlagMask = 1 -) - -// btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst. -type btfType struct { - NameOff uint32 - /* "info" bits arrangement - * bits 0-15: vlen (e.g. # of struct's members), linkage - * bits 16-23: unused - * bits 24-28: kind (e.g. int, ptr, array...etc) - * bits 29-30: unused - * bit 31: kind_flag, currently used by - * struct, union and fwd - */ - Info uint32 - /* "size" is used by INT, ENUM, STRUCT and UNION. - * "size" tells the size of the type it is describing. - * - * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, - * FUNC and FUNC_PROTO. - * "type" is a type_id referring to another type. - */ - SizeType uint32 -} - -func (k btfKind) String() string { - switch k { - case kindUnknown: - return "Unknown" - case kindInt: - return "Integer" - case kindPointer: - return "Pointer" - case kindArray: - return "Array" - case kindStruct: - return "Struct" - case kindUnion: - return "Union" - case kindEnum: - return "Enumeration" - case kindForward: - return "Forward" - case kindTypedef: - return "Typedef" - case kindVolatile: - return "Volatile" - case kindConst: - return "Const" - case kindRestrict: - return "Restrict" - case kindFunc: - return "Function" - case kindFuncProto: - return "Function Proto" - case kindVar: - return "Variable" - case kindDatasec: - return "Section" - case kindFloat: - return "Float" - default: - return fmt.Sprintf("Unknown (%d)", k) - } -} - -func mask(len uint32) uint32 { - return (1 << len) - 1 -} - -func (bt *btfType) info(len, shift uint32) uint32 { - return (bt.Info >> shift) & mask(len) -} - -func (bt *btfType) setInfo(value, len, shift uint32) { - bt.Info &^= mask(len) << shift - bt.Info |= (value & mask(len)) << shift -} - -func (bt *btfType) Kind() btfKind { - return btfKind(bt.info(btfTypeKindLen, btfTypeKindShift)) -} - -func (bt *btfType) SetKind(kind btfKind) { - bt.setInfo(uint32(kind), btfTypeKindLen, btfTypeKindShift) -} - -func (bt *btfType) Vlen() int { - return int(bt.info(btfTypeVlenMask, btfTypeVlenShift)) -} - -func (bt *btfType) SetVlen(vlen int) { - bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift) -} - -func (bt *btfType) KindFlag() bool { - return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1 -} - -func (bt *btfType) Linkage() FuncLinkage { - return FuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift)) -} - -func (bt *btfType) SetLinkage(linkage FuncLinkage) { - bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift) -} - -func (bt *btfType) Type() TypeID { - // TODO: Panic here if wrong kind? - return TypeID(bt.SizeType) -} - -func (bt *btfType) Size() uint32 { - // TODO: Panic here if wrong kind? - return bt.SizeType -} - -type rawType struct { - btfType - data interface{} -} - -func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error { - if err := binary.Write(w, bo, &rt.btfType); err != nil { - return err - } - - if rt.data == nil { - return nil - } - - return binary.Write(w, bo, rt.data) -} - -type btfArray struct { - Type TypeID - IndexType TypeID - Nelems uint32 -} - -type btfMember struct { - NameOff uint32 - Type TypeID - Offset uint32 -} - -type btfVarSecinfo struct { - Type TypeID - Offset uint32 - Size uint32 -} - -type btfVariable struct { - Linkage uint32 -} - -type btfEnum struct { - NameOff uint32 - Val int32 -} - -type btfParam struct { - NameOff uint32 - Type TypeID -} - -func readTypes(r io.Reader, bo binary.ByteOrder) ([]rawType, error) { - var ( - header btfType - types []rawType - ) - - for id := TypeID(1); ; id++ { - if err := binary.Read(r, bo, &header); err == io.EOF { - return types, nil - } else if err != nil { - return nil, fmt.Errorf("can't read type info for id %v: %v", id, err) - } - - var data interface{} - switch header.Kind() { - case kindInt: - data = new(uint32) - case kindPointer: - case kindArray: - data = new(btfArray) - case kindStruct: - fallthrough - case kindUnion: - data = make([]btfMember, header.Vlen()) - case kindEnum: - data = make([]btfEnum, header.Vlen()) - case kindForward: - case kindTypedef: - case kindVolatile: - case kindConst: - case kindRestrict: - case kindFunc: - case kindFuncProto: - data = make([]btfParam, header.Vlen()) - case kindVar: - data = new(btfVariable) - case kindDatasec: - data = make([]btfVarSecinfo, header.Vlen()) - case kindFloat: - default: - return nil, fmt.Errorf("type id %v: unknown kind: %v", id, header.Kind()) - } - - if data == nil { - types = append(types, rawType{header, nil}) - continue - } - - if err := binary.Read(r, bo, data); err != nil { - return nil, fmt.Errorf("type id %d: kind %v: can't read %T: %v", id, header.Kind(), data, err) - } - - types = append(types, rawType{header, data}) - } -} - -func intEncoding(raw uint32) (IntEncoding, uint32, byte) { - return IntEncoding((raw & 0x0f000000) >> 24), (raw & 0x00ff0000) >> 16, byte(raw & 0x000000ff) -} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/btf_types_string.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/btf_types_string.go deleted file mode 100644 index 0e0c17d68..000000000 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/btf_types_string.go +++ /dev/null @@ -1,44 +0,0 @@ -// Code generated by "stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage"; DO NOT EDIT. - -package btf - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[StaticFunc-0] - _ = x[GlobalFunc-1] - _ = x[ExternFunc-2] -} - -const _FuncLinkage_name = "staticglobalextern" - -var _FuncLinkage_index = [...]uint8{0, 6, 12, 18} - -func (i FuncLinkage) String() string { - if i < 0 || i >= FuncLinkage(len(_FuncLinkage_index)-1) { - return "FuncLinkage(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _FuncLinkage_name[_FuncLinkage_index[i]:_FuncLinkage_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[StaticVar-0] - _ = x[GlobalVar-1] - _ = x[ExternVar-2] -} - -const _VarLinkage_name = "staticglobalextern" - -var _VarLinkage_index = [...]uint8{0, 6, 12, 18} - -func (i VarLinkage) String() string { - if i < 0 || i >= VarLinkage(len(_VarLinkage_index)-1) { - return "VarLinkage(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _VarLinkage_name[_VarLinkage_index[i]:_VarLinkage_index[i+1]] -} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/core.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/core.go deleted file mode 100644 index 95908308a..000000000 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/core.go +++ /dev/null @@ -1,901 +0,0 @@ -package btf - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - - "github.com/cilium/ebpf/asm" -) - -// Code in this file is derived from libbpf, which is available under a BSD -// 2-Clause license. - -// COREFixup is the result of computing a CO-RE relocation for a target. -type COREFixup struct { - Kind COREKind - Local uint32 - Target uint32 - Poison bool -} - -func (f COREFixup) equal(other COREFixup) bool { - return f.Local == other.Local && f.Target == other.Target -} - -func (f COREFixup) String() string { - if f.Poison { - return fmt.Sprintf("%s=poison", f.Kind) - } - return fmt.Sprintf("%s=%d->%d", f.Kind, f.Local, f.Target) -} - -func (f COREFixup) apply(ins *asm.Instruction) error { - if f.Poison { - return errors.New("can't poison individual instruction") - } - - switch class := ins.OpCode.Class(); class { - case asm.LdXClass, asm.StClass, asm.StXClass: - if want := int16(f.Local); want != ins.Offset { - return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, want) - } - - if f.Target > math.MaxInt16 { - return fmt.Errorf("offset %d exceeds MaxInt16", f.Target) - } - - ins.Offset = int16(f.Target) - - case asm.LdClass: - if !ins.IsConstantLoad(asm.DWord) { - return fmt.Errorf("not a dword-sized immediate load") - } - - if want := int64(f.Local); want != ins.Constant { - return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want) - } - - ins.Constant = int64(f.Target) - - case asm.ALUClass: - if ins.OpCode.ALUOp() == asm.Swap { - return fmt.Errorf("relocation against swap") - } - - fallthrough - - case asm.ALU64Class: - if src := ins.OpCode.Source(); src != asm.ImmSource { - return fmt.Errorf("invalid source %s", src) - } - - if want := int64(f.Local); want != ins.Constant { - return fmt.Errorf("invalid immediate %d, expected %d", ins.Constant, want) - } - - if f.Target > math.MaxInt32 { - return fmt.Errorf("immediate %d exceeds MaxInt32", f.Target) - } - - ins.Constant = int64(f.Target) - - default: - return fmt.Errorf("invalid class %s", class) - } - - return nil -} - -func (f COREFixup) isNonExistant() bool { - return f.Kind.checksForExistence() && f.Target == 0 -} - -type COREFixups map[uint64]COREFixup - -// Apply returns a copy of insns with CO-RE relocations applied. -func (fs COREFixups) Apply(insns asm.Instructions) (asm.Instructions, error) { - if len(fs) == 0 { - cpy := make(asm.Instructions, len(insns)) - copy(cpy, insns) - return insns, nil - } - - cpy := make(asm.Instructions, 0, len(insns)) - iter := insns.Iterate() - for iter.Next() { - fixup, ok := fs[iter.Offset.Bytes()] - if !ok { - cpy = append(cpy, *iter.Ins) - continue - } - - ins := *iter.Ins - if fixup.Poison { - const badRelo = asm.BuiltinFunc(0xbad2310) - - cpy = append(cpy, badRelo.Call()) - if ins.OpCode.IsDWordLoad() { - // 64 bit constant loads occupy two raw bpf instructions, so - // we need to add another instruction as padding. - cpy = append(cpy, badRelo.Call()) - } - - continue - } - - if err := fixup.apply(&ins); err != nil { - return nil, fmt.Errorf("instruction %d, offset %d: %s: %w", iter.Index, iter.Offset.Bytes(), fixup.Kind, err) - } - - cpy = append(cpy, ins) - } - - return cpy, nil -} - -// COREKind is the type of CO-RE relocation -type COREKind uint32 - -const ( - reloFieldByteOffset COREKind = iota /* field byte offset */ - reloFieldByteSize /* field size in bytes */ - reloFieldExists /* field existence in target kernel */ - reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */ - reloFieldLShiftU64 /* bitfield-specific left bitshift */ - reloFieldRShiftU64 /* bitfield-specific right bitshift */ - reloTypeIDLocal /* type ID in local BPF object */ - reloTypeIDTarget /* type ID in target kernel */ - reloTypeExists /* type existence in target kernel */ - reloTypeSize /* type size in bytes */ - reloEnumvalExists /* enum value existence in target kernel */ - reloEnumvalValue /* enum value integer value */ -) - -func (k COREKind) String() string { - switch k { - case reloFieldByteOffset: - return "byte_off" - case reloFieldByteSize: - return "byte_sz" - case reloFieldExists: - return "field_exists" - case reloFieldSigned: - return "signed" - case reloFieldLShiftU64: - return "lshift_u64" - case reloFieldRShiftU64: - return "rshift_u64" - case reloTypeIDLocal: - return "local_type_id" - case reloTypeIDTarget: - return "target_type_id" - case reloTypeExists: - return "type_exists" - case reloTypeSize: - return "type_size" - case reloEnumvalExists: - return "enumval_exists" - case reloEnumvalValue: - return "enumval_value" - default: - return "unknown" - } -} - -func (k COREKind) checksForExistence() bool { - return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists -} - -func coreRelocate(local, target *Spec, relos CoreRelos) (COREFixups, error) { - if local.byteOrder != target.byteOrder { - return nil, fmt.Errorf("can't relocate %s against %s", local.byteOrder, target.byteOrder) - } - - var ids []TypeID - relosByID := make(map[TypeID]CoreRelos) - result := make(COREFixups, len(relos)) - for _, relo := range relos { - if relo.kind == reloTypeIDLocal { - // Filtering out reloTypeIDLocal here makes our lives a lot easier - // down the line, since it doesn't have a target at all. - if len(relo.accessor) > 1 || relo.accessor[0] != 0 { - return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor) - } - - result[uint64(relo.insnOff)] = COREFixup{ - relo.kind, - uint32(relo.typeID), - uint32(relo.typeID), - false, - } - continue - } - - relos, ok := relosByID[relo.typeID] - if !ok { - ids = append(ids, relo.typeID) - } - relosByID[relo.typeID] = append(relos, relo) - } - - // Ensure we work on relocations in a deterministic order. - sort.Slice(ids, func(i, j int) bool { - return ids[i] < ids[j] - }) - - for _, id := range ids { - if int(id) >= len(local.types) { - return nil, fmt.Errorf("invalid type id %d", id) - } - - localType := local.types[id] - localTypeName := localType.TypeName() - if localTypeName == "" { - return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported) - } - - relos := relosByID[id] - targets := target.namedTypes[newEssentialName(localTypeName)] - fixups, err := coreCalculateFixups(localType, targets, relos) - if err != nil { - return nil, fmt.Errorf("relocate %s: %w", localType, err) - } - - for i, relo := range relos { - result[uint64(relo.insnOff)] = fixups[i] - } - } - - return result, nil -} - -var errAmbiguousRelocation = errors.New("ambiguous relocation") -var errImpossibleRelocation = errors.New("impossible relocation") - -// coreCalculateFixups calculates the fixups for the given relocations using -// the "best" target. -// -// The best target is determined by scoring: the less poisoning we have to do -// the better the target is. -func coreCalculateFixups(local Type, targets []Type, relos CoreRelos) ([]COREFixup, error) { - localID := local.ID() - local, err := copyType(local, skipQualifiersAndTypedefs) - if err != nil { - return nil, err - } - - bestScore := len(relos) - var bestFixups []COREFixup - for i := range targets { - targetID := targets[i].ID() - target, err := copyType(targets[i], skipQualifiersAndTypedefs) - if err != nil { - return nil, err - } - - score := 0 // lower is better - fixups := make([]COREFixup, 0, len(relos)) - for _, relo := range relos { - fixup, err := coreCalculateFixup(local, localID, target, targetID, relo) - if err != nil { - return nil, fmt.Errorf("target %s: %w", target, err) - } - if fixup.Poison || fixup.isNonExistant() { - score++ - } - fixups = append(fixups, fixup) - } - - if score > bestScore { - // We have a better target already, ignore this one. - continue - } - - if score < bestScore { - // This is the best target yet, use it. - bestScore = score - bestFixups = fixups - continue - } - - // Some other target has the same score as the current one. Make sure - // the fixups agree with each other. - for i, fixup := range bestFixups { - if !fixup.equal(fixups[i]) { - return nil, fmt.Errorf("%s: multiple types match: %w", fixup.Kind, errAmbiguousRelocation) - } - } - } - - if bestFixups == nil { - // Nothing at all matched, probably because there are no suitable - // targets at all. Poison everything! - bestFixups = make([]COREFixup, len(relos)) - for i, relo := range relos { - bestFixups[i] = COREFixup{Kind: relo.kind, Poison: true} - } - } - - return bestFixups, nil -} - -// coreCalculateFixup calculates the fixup for a single local type, target type -// and relocation. -func coreCalculateFixup(local Type, localID TypeID, target Type, targetID TypeID, relo CoreRelo) (COREFixup, error) { - fixup := func(local, target uint32) (COREFixup, error) { - return COREFixup{relo.kind, local, target, false}, nil - } - poison := func() (COREFixup, error) { - if relo.kind.checksForExistence() { - return fixup(1, 0) - } - return COREFixup{relo.kind, 0, 0, true}, nil - } - zero := COREFixup{} - - switch relo.kind { - case reloTypeIDTarget, reloTypeSize, reloTypeExists: - if len(relo.accessor) > 1 || relo.accessor[0] != 0 { - return zero, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor) - } - - err := coreAreTypesCompatible(local, target) - if errors.Is(err, errImpossibleRelocation) { - return poison() - } - if err != nil { - return zero, fmt.Errorf("relocation %s: %w", relo.kind, err) - } - - switch relo.kind { - case reloTypeExists: - return fixup(1, 1) - - case reloTypeIDTarget: - return fixup(uint32(localID), uint32(targetID)) - - case reloTypeSize: - localSize, err := Sizeof(local) - if err != nil { - return zero, err - } - - targetSize, err := Sizeof(target) - if err != nil { - return zero, err - } - - return fixup(uint32(localSize), uint32(targetSize)) - } - - case reloEnumvalValue, reloEnumvalExists: - localValue, targetValue, err := coreFindEnumValue(local, relo.accessor, target) - if errors.Is(err, errImpossibleRelocation) { - return poison() - } - if err != nil { - return zero, fmt.Errorf("relocation %s: %w", relo.kind, err) - } - - switch relo.kind { - case reloEnumvalExists: - return fixup(1, 1) - - case reloEnumvalValue: - return fixup(uint32(localValue.Value), uint32(targetValue.Value)) - } - - case reloFieldByteOffset, reloFieldByteSize, reloFieldExists: - if _, ok := target.(*Fwd); ok { - // We can't relocate fields using a forward declaration, so - // skip it. If a non-forward declaration is present in the BTF - // we'll find it in one of the other iterations. - return poison() - } - - localField, targetField, err := coreFindField(local, relo.accessor, target) - if errors.Is(err, errImpossibleRelocation) { - return poison() - } - if err != nil { - return zero, fmt.Errorf("target %s: %w", target, err) - } - - switch relo.kind { - case reloFieldExists: - return fixup(1, 1) - - case reloFieldByteOffset: - return fixup(localField.offset/8, targetField.offset/8) - - case reloFieldByteSize: - localSize, err := Sizeof(localField.Type) - if err != nil { - return zero, err - } - - targetSize, err := Sizeof(targetField.Type) - if err != nil { - return zero, err - } - - return fixup(uint32(localSize), uint32(targetSize)) - - } - } - - return zero, fmt.Errorf("relocation %s: %w", relo.kind, ErrNotSupported) -} - -/* coreAccessor contains a path through a struct. It contains at least one index. - * - * The interpretation depends on the kind of the relocation. The following is - * taken from struct bpf_core_relo in libbpf_internal.h: - * - * - for field-based relocations, string encodes an accessed field using - * a sequence of field and array indices, separated by colon (:). It's - * conceptually very close to LLVM's getelementptr ([0]) instruction's - * arguments for identifying offset to a field. - * - for type-based relocations, strings is expected to be just "0"; - * - for enum value-based relocations, string contains an index of enum - * value within its enum type; - * - * Example to provide a better feel. - * - * struct sample { - * int a; - * struct { - * int b[10]; - * }; - * }; - * - * struct sample s = ...; - * int x = &s->a; // encoded as "0:0" (a is field #0) - * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1, - * // b is field #0 inside anon struct, accessing elem #5) - * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array) - */ -type coreAccessor []int - -func parseCoreAccessor(accessor string) (coreAccessor, error) { - if accessor == "" { - return nil, fmt.Errorf("empty accessor") - } - - parts := strings.Split(accessor, ":") - result := make(coreAccessor, 0, len(parts)) - for _, part := range parts { - // 31 bits to avoid overflowing int on 32 bit platforms. - index, err := strconv.ParseUint(part, 10, 31) - if err != nil { - return nil, fmt.Errorf("accessor index %q: %s", part, err) - } - - result = append(result, int(index)) - } - - return result, nil -} - -func (ca coreAccessor) String() string { - strs := make([]string, 0, len(ca)) - for _, i := range ca { - strs = append(strs, strconv.Itoa(i)) - } - return strings.Join(strs, ":") -} - -func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) { - e, ok := t.(*Enum) - if !ok { - return nil, fmt.Errorf("not an enum: %s", t) - } - - if len(ca) > 1 { - return nil, fmt.Errorf("invalid accessor %s for enum", ca) - } - - i := ca[0] - if i >= len(e.Values) { - return nil, fmt.Errorf("invalid index %d for %s", i, e) - } - - return &e.Values[i], nil -} - -type coreField struct { - Type Type - offset uint32 -} - -func adjustOffset(base uint32, t Type, n int) (uint32, error) { - size, err := Sizeof(t) - if err != nil { - return 0, err - } - - return base + (uint32(n) * uint32(size) * 8), nil -} - -// coreFindField descends into the local type using the accessor and tries to -// find an equivalent field in target at each step. -// -// Returns the field and the offset of the field from the start of -// target in bits. -func coreFindField(local Type, localAcc coreAccessor, target Type) (_, _ coreField, _ error) { - // The first index is used to offset a pointer of the base type like - // when accessing an array. - localOffset, err := adjustOffset(0, local, localAcc[0]) - if err != nil { - return coreField{}, coreField{}, err - } - - targetOffset, err := adjustOffset(0, target, localAcc[0]) - if err != nil { - return coreField{}, coreField{}, err - } - - if err := coreAreMembersCompatible(local, target); err != nil { - return coreField{}, coreField{}, fmt.Errorf("fields: %w", err) - } - - var localMaybeFlex, targetMaybeFlex bool - for _, acc := range localAcc[1:] { - switch localType := local.(type) { - case composite: - // For composite types acc is used to find the field in the local type, - // and then we try to find a field in target with the same name. - localMembers := localType.members() - if acc >= len(localMembers) { - return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, local) - } - - localMember := localMembers[acc] - if localMember.Name == "" { - _, ok := localMember.Type.(composite) - if !ok { - return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported) - } - - // This is an anonymous struct or union, ignore it. - local = localMember.Type - localOffset += localMember.OffsetBits - localMaybeFlex = false - continue - } - - targetType, ok := target.(composite) - if !ok { - return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation) - } - - targetMember, last, err := coreFindMember(targetType, localMember.Name) - if err != nil { - return coreField{}, coreField{}, err - } - - if targetMember.BitfieldSize > 0 { - return coreField{}, coreField{}, fmt.Errorf("field %q is a bitfield: %w", targetMember.Name, ErrNotSupported) - } - - local = localMember.Type - localMaybeFlex = acc == len(localMembers)-1 - localOffset += localMember.OffsetBits - target = targetMember.Type - targetMaybeFlex = last - targetOffset += targetMember.OffsetBits - - case *Array: - // For arrays, acc is the index in the target. - targetType, ok := target.(*Array) - if !ok { - return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation) - } - - if localType.Nelems == 0 && !localMaybeFlex { - return coreField{}, coreField{}, fmt.Errorf("local type has invalid flexible array") - } - if targetType.Nelems == 0 && !targetMaybeFlex { - return coreField{}, coreField{}, fmt.Errorf("target type has invalid flexible array") - } - - if localType.Nelems > 0 && acc >= int(localType.Nelems) { - return coreField{}, coreField{}, fmt.Errorf("invalid access of %s at index %d", localType, acc) - } - if targetType.Nelems > 0 && acc >= int(targetType.Nelems) { - return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation) - } - - local = localType.Type - localMaybeFlex = false - localOffset, err = adjustOffset(localOffset, local, acc) - if err != nil { - return coreField{}, coreField{}, err - } - - target = targetType.Type - targetMaybeFlex = false - targetOffset, err = adjustOffset(targetOffset, target, acc) - if err != nil { - return coreField{}, coreField{}, err - } - - default: - return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported) - } - - if err := coreAreMembersCompatible(local, target); err != nil { - return coreField{}, coreField{}, err - } - } - - return coreField{local, localOffset}, coreField{target, targetOffset}, nil -} - -// coreFindMember finds a member in a composite type while handling anonymous -// structs and unions. -func coreFindMember(typ composite, name string) (Member, bool, error) { - if name == "" { - return Member{}, false, errors.New("can't search for anonymous member") - } - - type offsetTarget struct { - composite - offset uint32 - } - - targets := []offsetTarget{{typ, 0}} - visited := make(map[composite]bool) - - for i := 0; i < len(targets); i++ { - target := targets[i] - - // Only visit targets once to prevent infinite recursion. - if visited[target] { - continue - } - if len(visited) >= maxTypeDepth { - // This check is different than libbpf, which restricts the entire - // path to BPF_CORE_SPEC_MAX_LEN items. - return Member{}, false, fmt.Errorf("type is nested too deep") - } - visited[target] = true - - members := target.members() - for j, member := range members { - if member.Name == name { - // NB: This is safe because member is a copy. - member.OffsetBits += target.offset - return member, j == len(members)-1, nil - } - - // The names don't match, but this member could be an anonymous struct - // or union. - if member.Name != "" { - continue - } - - comp, ok := member.Type.(composite) - if !ok { - return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type) - } - - targets = append(targets, offsetTarget{comp, target.offset + member.OffsetBits}) - } - } - - return Member{}, false, fmt.Errorf("no matching member: %w", errImpossibleRelocation) -} - -// coreFindEnumValue follows localAcc to find the equivalent enum value in target. -func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localValue, targetValue *EnumValue, _ error) { - localValue, err := localAcc.enumValue(local) - if err != nil { - return nil, nil, err - } - - targetEnum, ok := target.(*Enum) - if !ok { - return nil, nil, errImpossibleRelocation - } - - localName := newEssentialName(localValue.Name) - for i, targetValue := range targetEnum.Values { - if newEssentialName(targetValue.Name) != localName { - continue - } - - return localValue, &targetEnum.Values[i], nil - } - - return nil, nil, errImpossibleRelocation -} - -/* The comment below is from bpf_core_types_are_compat in libbpf.c: - * - * Check local and target types for compatibility. This check is used for - * type-based CO-RE relocations and follow slightly different rules than - * field-based relocations. This function assumes that root types were already - * checked for name match. Beyond that initial root-level name check, names - * are completely ignored. Compatibility rules are as follows: - * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but - * kind should match for local and target types (i.e., STRUCT is not - * compatible with UNION); - * - for ENUMs, the size is ignored; - * - for INT, size and signedness are ignored; - * - for ARRAY, dimensionality is ignored, element types are checked for - * compatibility recursively; - * - CONST/VOLATILE/RESTRICT modifiers are ignored; - * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; - * - FUNC_PROTOs are compatible if they have compatible signature: same - * number of input args and compatible return and argument types. - * These rules are not set in stone and probably will be adjusted as we get - * more experience with using BPF CO-RE relocations. - * - * Returns errImpossibleRelocation if types are not compatible. - */ -func coreAreTypesCompatible(localType Type, targetType Type) error { - var ( - localTs, targetTs typeDeque - l, t = &localType, &targetType - depth = 0 - ) - - for ; l != nil && t != nil; l, t = localTs.shift(), targetTs.shift() { - if depth >= maxTypeDepth { - return errors.New("types are nested too deep") - } - - localType = *l - targetType = *t - - if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { - return fmt.Errorf("type mismatch: %w", errImpossibleRelocation) - } - - switch lv := (localType).(type) { - case *Void, *Struct, *Union, *Enum, *Fwd: - // Nothing to do here - - case *Int: - tv := targetType.(*Int) - if lv.isBitfield() || tv.isBitfield() { - return fmt.Errorf("bitfield: %w", errImpossibleRelocation) - } - - case *Pointer, *Array: - depth++ - localType.walk(&localTs) - targetType.walk(&targetTs) - - case *FuncProto: - tv := targetType.(*FuncProto) - if len(lv.Params) != len(tv.Params) { - return fmt.Errorf("function param mismatch: %w", errImpossibleRelocation) - } - - depth++ - localType.walk(&localTs) - targetType.walk(&targetTs) - - default: - return fmt.Errorf("unsupported type %T", localType) - } - } - - if l != nil { - return fmt.Errorf("dangling local type %T", *l) - } - - if t != nil { - return fmt.Errorf("dangling target type %T", *t) - } - - return nil -} - -/* coreAreMembersCompatible checks two types for field-based relocation compatibility. - * - * The comment below is from bpf_core_fields_are_compat in libbpf.c: - * - * Check two types for compatibility for the purpose of field access - * relocation. const/volatile/restrict and typedefs are skipped to ensure we - * are relocating semantically compatible entities: - * - any two STRUCTs/UNIONs are compatible and can be mixed; - * - any two FWDs are compatible, if their names match (modulo flavor suffix); - * - any two PTRs are always compatible; - * - for ENUMs, names should be the same (ignoring flavor suffix) or at - * least one of enums should be anonymous; - * - for ENUMs, check sizes, names are ignored; - * - for INT, size and signedness are ignored; - * - any two FLOATs are always compatible; - * - for ARRAY, dimensionality is ignored, element types are checked for - * compatibility recursively; - * [ NB: coreAreMembersCompatible doesn't recurse, this check is done - * by coreFindField. ] - * - everything else shouldn't be ever a target of relocation. - * These rules are not set in stone and probably will be adjusted as we get - * more experience with using BPF CO-RE relocations. - * - * Returns errImpossibleRelocation if the members are not compatible. - */ -func coreAreMembersCompatible(localType Type, targetType Type) error { - doNamesMatch := func(a, b string) error { - if a == "" || b == "" { - // allow anonymous and named type to match - return nil - } - - if newEssentialName(a) == newEssentialName(b) { - return nil - } - - return fmt.Errorf("names don't match: %w", errImpossibleRelocation) - } - - _, lok := localType.(composite) - _, tok := targetType.(composite) - if lok && tok { - return nil - } - - if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { - return fmt.Errorf("type mismatch: %w", errImpossibleRelocation) - } - - switch lv := localType.(type) { - case *Array, *Pointer, *Float: - return nil - - case *Enum: - tv := targetType.(*Enum) - return doNamesMatch(lv.Name, tv.Name) - - case *Fwd: - tv := targetType.(*Fwd) - return doNamesMatch(lv.Name, tv.Name) - - case *Int: - tv := targetType.(*Int) - if lv.isBitfield() || tv.isBitfield() { - return fmt.Errorf("bitfield: %w", errImpossibleRelocation) - } - return nil - - default: - return fmt.Errorf("type %s: %w", localType, ErrNotSupported) - } -} - -func skipQualifiersAndTypedefs(typ Type) (Type, error) { - result := typ - for depth := 0; depth <= maxTypeDepth; depth++ { - switch v := (result).(type) { - case qualifier: - result = v.qualify() - case *Typedef: - result = v.Type - default: - return result, nil - } - } - return nil, errors.New("exceeded type depth") -} - -func skipQualifiers(typ Type) (Type, error) { - result := typ - for depth := 0; depth <= maxTypeDepth; depth++ { - switch v := (result).(type) { - case qualifier: - result = v.qualify() - default: - return result, nil - } - } - return nil, errors.New("exceeded type depth") -} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go deleted file mode 100644 index c4da1e489..000000000 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/ext_info.go +++ /dev/null @@ -1,497 +0,0 @@ -package btf - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "math" - - "github.com/cilium/ebpf/asm" - "github.com/cilium/ebpf/internal" -) - -// extInfo contains extended program metadata. -// -// It is indexed per section. -type extInfo struct { - funcInfos map[string]FuncInfos - lineInfos map[string]LineInfos - relos map[string]CoreRelos -} - -// loadExtInfos parses the .BTF.ext section into its constituent parts. -func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, strings stringTable) (*extInfo, error) { - // Open unbuffered section reader. binary.Read() calls io.ReadFull on - // the header structs, resulting in one syscall per header. - headerRd := io.NewSectionReader(r, 0, math.MaxInt64) - extHeader, err := parseBTFExtHeader(headerRd, bo) - if err != nil { - return nil, fmt.Errorf("parsing BTF extension header: %w", err) - } - - coreHeader, err := parseBTFExtCoreHeader(headerRd, bo, extHeader) - if err != nil { - return nil, fmt.Errorf("parsing BTF CO-RE header: %w", err) - } - - buf := internal.NewBufferedSectionReader(r, extHeader.funcInfoStart(), int64(extHeader.FuncInfoLen)) - funcInfos, err := parseFuncInfos(buf, bo, strings) - if err != nil { - return nil, fmt.Errorf("parsing BTF function info: %w", err) - } - - buf = internal.NewBufferedSectionReader(r, extHeader.lineInfoStart(), int64(extHeader.LineInfoLen)) - lineInfos, err := parseLineInfos(buf, bo, strings) - if err != nil { - return nil, fmt.Errorf("parsing BTF line info: %w", err) - } - - relos := make(map[string]CoreRelos) - if coreHeader != nil && coreHeader.CoreReloOff > 0 && coreHeader.CoreReloLen > 0 { - buf = internal.NewBufferedSectionReader(r, extHeader.coreReloStart(coreHeader), int64(coreHeader.CoreReloLen)) - relos, err = parseCoreRelos(buf, bo, strings) - if err != nil { - return nil, fmt.Errorf("parsing CO-RE relocation info: %w", err) - } - } - - return &extInfo{funcInfos, lineInfos, relos}, nil -} - -// btfExtHeader is found at the start of the .BTF.ext section. -type btfExtHeader struct { - Magic uint16 - Version uint8 - Flags uint8 - - // HdrLen is larger than the size of struct btfExtHeader when it is - // immediately followed by a btfExtCoreHeader. - HdrLen uint32 - - FuncInfoOff uint32 - FuncInfoLen uint32 - LineInfoOff uint32 - LineInfoLen uint32 -} - -// parseBTFExtHeader parses the header of the .BTF.ext section. -func parseBTFExtHeader(r io.Reader, bo binary.ByteOrder) (*btfExtHeader, error) { - var header btfExtHeader - if err := binary.Read(r, bo, &header); err != nil { - return nil, fmt.Errorf("can't read header: %v", err) - } - - if header.Magic != btfMagic { - return nil, fmt.Errorf("incorrect magic value %v", header.Magic) - } - - if header.Version != 1 { - return nil, fmt.Errorf("unexpected version %v", header.Version) - } - - if header.Flags != 0 { - return nil, fmt.Errorf("unsupported flags %v", header.Flags) - } - - if int64(header.HdrLen) < int64(binary.Size(&header)) { - return nil, fmt.Errorf("header length shorter than btfExtHeader size") - } - - return &header, nil -} - -// funcInfoStart returns the offset from the beginning of the .BTF.ext section -// to the start of its func_info entries. -func (h *btfExtHeader) funcInfoStart() int64 { - return int64(h.HdrLen + h.FuncInfoOff) -} - -// lineInfoStart returns the offset from the beginning of the .BTF.ext section -// to the start of its line_info entries. -func (h *btfExtHeader) lineInfoStart() int64 { - return int64(h.HdrLen + h.LineInfoOff) -} - -// coreReloStart returns the offset from the beginning of the .BTF.ext section -// to the start of its CO-RE relocation entries. -func (h *btfExtHeader) coreReloStart(ch *btfExtCoreHeader) int64 { - return int64(h.HdrLen + ch.CoreReloOff) -} - -// btfExtCoreHeader is found right after the btfExtHeader when its HdrLen -// field is larger than its size. -type btfExtCoreHeader struct { - CoreReloOff uint32 - CoreReloLen uint32 -} - -// parseBTFExtCoreHeader parses the tail of the .BTF.ext header. If additional -// header bytes are present, extHeader.HdrLen will be larger than the struct, -// indicating the presence of a CO-RE extension header. -func parseBTFExtCoreHeader(r io.Reader, bo binary.ByteOrder, extHeader *btfExtHeader) (*btfExtCoreHeader, error) { - extHdrSize := int64(binary.Size(&extHeader)) - remainder := int64(extHeader.HdrLen) - extHdrSize - - if remainder == 0 { - return nil, nil - } - - var coreHeader btfExtCoreHeader - if err := binary.Read(r, bo, &coreHeader); err != nil { - return nil, fmt.Errorf("can't read header: %v", err) - } - - return &coreHeader, nil -} - -type btfExtInfoSec struct { - SecNameOff uint32 - NumInfo uint32 -} - -// parseExtInfoSec parses a btf_ext_info_sec header within .BTF.ext, -// appearing within func_info and line_info sub-sections. -// These headers appear once for each program section in the ELF and are -// followed by one or more func/line_info records for the section. -func parseExtInfoSec(r io.Reader, bo binary.ByteOrder, strings stringTable) (string, *btfExtInfoSec, error) { - var infoHeader btfExtInfoSec - if err := binary.Read(r, bo, &infoHeader); err != nil { - return "", nil, fmt.Errorf("read ext info header: %w", err) - } - - secName, err := strings.Lookup(infoHeader.SecNameOff) - if err != nil { - return "", nil, fmt.Errorf("get section name: %w", err) - } - if secName == "" { - return "", nil, fmt.Errorf("extinfo header refers to empty section name") - } - - if infoHeader.NumInfo == 0 { - return "", nil, fmt.Errorf("section %s has zero records", secName) - } - - return secName, &infoHeader, nil -} - -// parseExtInfoRecordSize parses the uint32 at the beginning of a func_infos -// or line_infos segment that describes the length of all extInfoRecords in -// that segment. -func parseExtInfoRecordSize(r io.Reader, bo binary.ByteOrder) (uint32, error) { - const maxRecordSize = 256 - - var recordSize uint32 - if err := binary.Read(r, bo, &recordSize); err != nil { - return 0, fmt.Errorf("can't read record size: %v", err) - } - - if recordSize < 4 { - // Need at least InsnOff worth of bytes per record. - return 0, errors.New("record size too short") - } - if recordSize > maxRecordSize { - return 0, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize) - } - - return recordSize, nil -} - -// FuncInfo represents the location and type ID of a function in a BPF ELF. -type FuncInfo struct { - // Instruction offset of the function within an ELF section. - // Always zero after parsing a funcinfo from an ELF, instruction streams - // are split on function boundaries. - InsnOff uint32 - TypeID TypeID -} - -// Name looks up the FuncInfo's corresponding function name in the given spec. -func (fi FuncInfo) Name(spec *Spec) (string, error) { - // Look up function name based on type ID. - typ, err := spec.TypeByID(fi.TypeID) - if err != nil { - return "", fmt.Errorf("looking up type by ID: %w", err) - } - if _, ok := typ.(*Func); !ok { - return "", fmt.Errorf("type ID %d is a %T, but expected a Func", fi.TypeID, typ) - } - - // C doesn't have anonymous functions, but check just in case. - if name := typ.TypeName(); name != "" { - return name, nil - } - - return "", fmt.Errorf("Func with type ID %d doesn't have a name", fi.TypeID) -} - -// Marshal writes the binary representation of the FuncInfo to w. -// The function offset is converted from bytes to instructions. -func (fi FuncInfo) Marshal(w io.Writer, offset uint64) error { - fi.InsnOff += uint32(offset) - // The kernel expects offsets in number of raw bpf instructions, - // while the ELF tracks it in bytes. - fi.InsnOff /= asm.InstructionSize - return binary.Write(w, internal.NativeEndian, fi) -} - -type FuncInfos []FuncInfo - -// funcForOffset returns the function that the instruction at the given -// ELF section offset belongs to. -// -// For example, consider an ELF section that contains 3 functions (a, b, c) -// at offsets 0, 10 and 15 respectively. Offset 5 will return function a, -// offset 12 will return b, offset >= 15 will return c, etc. -func (infos FuncInfos) funcForOffset(offset uint32) *FuncInfo { - for n, fi := range infos { - // Iterator went past the offset the caller is looking for, - // no point in continuing the search. - if offset < fi.InsnOff { - return nil - } - - // If there is no next item in the list, or if the given offset - // is smaller than the next function, the offset belongs to - // the current function. - if n+1 >= len(infos) || offset < infos[n+1].InsnOff { - return &fi - } - } - - return nil -} - -// parseLineInfos parses a func_info sub-section within .BTF.ext ito a map of -// func infos indexed by section name. -func parseFuncInfos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]FuncInfos, error) { - recordSize, err := parseExtInfoRecordSize(r, bo) - if err != nil { - return nil, err - } - - result := make(map[string]FuncInfos) - for { - secName, infoHeader, err := parseExtInfoSec(r, bo, strings) - if errors.Is(err, io.EOF) { - return result, nil - } - if err != nil { - return nil, err - } - - records, err := parseFuncInfoRecords(r, bo, recordSize, infoHeader.NumInfo) - if err != nil { - return nil, fmt.Errorf("section %v: %w", secName, err) - } - - result[secName] = records - } -} - -// parseFuncInfoRecords parses a stream of func_infos into a funcInfos. -// These records appear after a btf_ext_info_sec header in the func_info -// sub-section of .BTF.ext. -func parseFuncInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) (FuncInfos, error) { - var out FuncInfos - var fi FuncInfo - - if exp, got := uint32(binary.Size(fi)), recordSize; exp != got { - // BTF blob's record size is longer than we know how to parse. - return nil, fmt.Errorf("expected FuncInfo record size %d, but BTF blob contains %d", exp, got) - } - - for i := uint32(0); i < recordNum; i++ { - if err := binary.Read(r, bo, &fi); err != nil { - return nil, fmt.Errorf("can't read function info: %v", err) - } - - if fi.InsnOff%asm.InstructionSize != 0 { - return nil, fmt.Errorf("offset %v is not aligned with instruction size", fi.InsnOff) - } - - out = append(out, fi) - } - - return out, nil -} - -// LineInfo represents the location and contents of a single line of source -// code a BPF ELF was compiled from. -type LineInfo struct { - // Instruction offset of the function within an ELF section. - // After parsing a LineInfo from an ELF, this offset is relative to - // the function body instead of an ELF section. - InsnOff uint32 - FileNameOff uint32 - LineOff uint32 - LineCol uint32 -} - -// Marshal writes the binary representation of the LineInfo to w. -// The instruction offset is converted from bytes to instructions. -func (li LineInfo) Marshal(w io.Writer, offset uint64) error { - li.InsnOff += uint32(offset) - // The kernel expects offsets in number of raw bpf instructions, - // while the ELF tracks it in bytes. - li.InsnOff /= asm.InstructionSize - return binary.Write(w, internal.NativeEndian, li) -} - -type LineInfos []LineInfo - -// Marshal writes the binary representation of the LineInfos to w. -func (li LineInfos) Marshal(w io.Writer, off uint64) error { - if len(li) == 0 { - return nil - } - - for _, info := range li { - if err := info.Marshal(w, off); err != nil { - return err - } - } - - return nil -} - -// parseLineInfos parses a line_info sub-section within .BTF.ext ito a map of -// line infos indexed by section name. -func parseLineInfos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]LineInfos, error) { - recordSize, err := parseExtInfoRecordSize(r, bo) - if err != nil { - return nil, err - } - - result := make(map[string]LineInfos) - for { - secName, infoHeader, err := parseExtInfoSec(r, bo, strings) - if errors.Is(err, io.EOF) { - return result, nil - } - if err != nil { - return nil, err - } - - records, err := parseLineInfoRecords(r, bo, recordSize, infoHeader.NumInfo) - if err != nil { - return nil, fmt.Errorf("section %v: %w", secName, err) - } - - result[secName] = records - } -} - -// parseLineInfoRecords parses a stream of line_infos into a lineInfos. -// These records appear after a btf_ext_info_sec header in the line_info -// sub-section of .BTF.ext. -func parseLineInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) (LineInfos, error) { - var out LineInfos - var li LineInfo - - if exp, got := uint32(binary.Size(li)), recordSize; exp != got { - // BTF blob's record size is longer than we know how to parse. - return nil, fmt.Errorf("expected LineInfo record size %d, but BTF blob contains %d", exp, got) - } - - for i := uint32(0); i < recordNum; i++ { - if err := binary.Read(r, bo, &li); err != nil { - return nil, fmt.Errorf("can't read line info: %v", err) - } - - if li.InsnOff%asm.InstructionSize != 0 { - return nil, fmt.Errorf("offset %v is not aligned with instruction size", li.InsnOff) - } - - out = append(out, li) - } - - return out, nil -} - -// bpfCoreRelo matches the kernel's struct bpf_core_relo. -type bpfCoreRelo struct { - InsnOff uint32 - TypeID TypeID - AccessStrOff uint32 - Kind COREKind -} - -type CoreRelo struct { - insnOff uint32 - typeID TypeID - accessor coreAccessor - kind COREKind -} - -type CoreRelos []CoreRelo - -var extInfoReloSize = binary.Size(bpfCoreRelo{}) - -// parseCoreRelos parses a core_relos sub-section within .BTF.ext ito a map of -// CO-RE relocations indexed by section name. -func parseCoreRelos(r io.Reader, bo binary.ByteOrder, strings stringTable) (map[string]CoreRelos, error) { - recordSize, err := parseExtInfoRecordSize(r, bo) - if err != nil { - return nil, err - } - - if recordSize != uint32(extInfoReloSize) { - return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize) - } - - result := make(map[string]CoreRelos) - for { - secName, infoHeader, err := parseExtInfoSec(r, bo, strings) - if errors.Is(err, io.EOF) { - return result, nil - } - if err != nil { - return nil, err - } - - records, err := parseCoreReloRecords(r, bo, recordSize, infoHeader.NumInfo, strings) - if err != nil { - return nil, fmt.Errorf("section %v: %w", secName, err) - } - - result[secName] = records - } -} - -// parseCoreReloRecords parses a stream of CO-RE relocation entries into a -// coreRelos. These records appear after a btf_ext_info_sec header in the -// core_relos sub-section of .BTF.ext. -func parseCoreReloRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32, strings stringTable) (CoreRelos, error) { - var out CoreRelos - - var relo bpfCoreRelo - for i := uint32(0); i < recordNum; i++ { - if err := binary.Read(r, bo, &relo); err != nil { - return nil, fmt.Errorf("can't read CO-RE relocation: %v", err) - } - - if relo.InsnOff%asm.InstructionSize != 0 { - return nil, fmt.Errorf("offset %v is not aligned with instruction size", relo.InsnOff) - } - - accessorStr, err := strings.Lookup(relo.AccessStrOff) - if err != nil { - return nil, err - } - - accessor, err := parseCoreAccessor(accessorStr) - if err != nil { - return nil, fmt.Errorf("accessor %q: %s", accessorStr, err) - } - - out = append(out, CoreRelo{ - relo.InsnOff, - relo.TypeID, - accessor, - relo.Kind, - }) - } - - return out, nil -} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/info.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/info.go deleted file mode 100644 index dd44a0be6..000000000 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/info.go +++ /dev/null @@ -1,51 +0,0 @@ -package btf - -import ( - "bytes" - - "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/sys" - "github.com/cilium/ebpf/internal/unix" -) - -// info describes a BTF object. -type info struct { - BTF *Spec - ID ID - // Name is an identifying name for the BTF, currently only used by the - // kernel. - Name string - // KernelBTF is true if the BTf originated with the kernel and not - // userspace. - KernelBTF bool -} - -func newInfoFromFd(fd *sys.FD) (*info, error) { - // We invoke the syscall once with a empty BTF and name buffers to get size - // information to allocate buffers. Then we invoke it a second time with - // buffers to receive the data. - var btfInfo sys.BtfInfo - if err := sys.ObjInfo(fd, &btfInfo); err != nil { - return nil, err - } - - btfBuffer := make([]byte, btfInfo.BtfSize) - nameBuffer := make([]byte, btfInfo.NameLen) - btfInfo.Btf, btfInfo.BtfSize = sys.NewSlicePointerLen(btfBuffer) - btfInfo.Name, btfInfo.NameLen = sys.NewSlicePointerLen(nameBuffer) - if err := sys.ObjInfo(fd, &btfInfo); err != nil { - return nil, err - } - - spec, err := loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, nil, nil) - if err != nil { - return nil, err - } - - return &info{ - BTF: spec, - ID: ID(btfInfo.Id), - Name: unix.ByteSliceToString(nameBuffer), - KernelBTF: btfInfo.KernelBtf != 0, - }, nil -} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/strings.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/strings.go deleted file mode 100644 index 9876aa227..000000000 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/strings.go +++ /dev/null @@ -1,54 +0,0 @@ -package btf - -import ( - "bytes" - "errors" - "fmt" - "io" -) - -type stringTable []byte - -func readStringTable(r io.Reader) (stringTable, error) { - contents, err := io.ReadAll(r) - if err != nil { - return nil, fmt.Errorf("can't read string table: %v", err) - } - - if len(contents) < 1 { - return nil, errors.New("string table is empty") - } - - if contents[0] != '\x00' { - return nil, errors.New("first item in string table is non-empty") - } - - if contents[len(contents)-1] != '\x00' { - return nil, errors.New("string table isn't null terminated") - } - - return stringTable(contents), nil -} - -func (st stringTable) Lookup(offset uint32) (string, error) { - if int64(offset) > int64(^uint(0)>>1) { - return "", fmt.Errorf("offset %d overflows int", offset) - } - - pos := int(offset) - if pos >= len(st) { - return "", fmt.Errorf("offset %d is out of bounds", offset) - } - - if pos > 0 && st[pos-1] != '\x00' { - return "", fmt.Errorf("offset %d isn't start of a string", offset) - } - - str := st[pos:] - end := bytes.IndexByte(str, '\x00') - if end == -1 { - return "", fmt.Errorf("offset %d isn't null terminated", offset) - } - - return string(str[:end]), nil -} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/types.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/types.go deleted file mode 100644 index a6b5a10aa..000000000 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/btf/types.go +++ /dev/null @@ -1,992 +0,0 @@ -package btf - -import ( - "fmt" - "math" - "strings" -) - -const maxTypeDepth = 32 - -// TypeID identifies a type in a BTF section. -type TypeID uint32 - -// ID implements part of the Type interface. -func (tid TypeID) ID() TypeID { - return tid -} - -// Type represents a type described by BTF. -type Type interface { - // The type ID of the Type within this BTF spec. - ID() TypeID - - // Name of the type, empty for anonymous types and types that cannot - // carry a name, like Void and Pointer. - TypeName() string - - // Make a copy of the type, without copying Type members. - copy() Type - - // Enumerate all nested Types. Repeated calls must visit nested - // types in the same order. - walk(*typeDeque) - - String() string -} - -var ( - _ Type = (*Int)(nil) - _ Type = (*Struct)(nil) - _ Type = (*Union)(nil) - _ Type = (*Enum)(nil) - _ Type = (*Fwd)(nil) - _ Type = (*Func)(nil) - _ Type = (*Typedef)(nil) - _ Type = (*Var)(nil) - _ Type = (*Datasec)(nil) - _ Type = (*Float)(nil) -) - -// Void is the unit type of BTF. -type Void struct{} - -func (v *Void) ID() TypeID { return 0 } -func (v *Void) String() string { return "void#0" } -func (v *Void) TypeName() string { return "" } -func (v *Void) size() uint32 { return 0 } -func (v *Void) copy() Type { return (*Void)(nil) } -func (v *Void) walk(*typeDeque) {} - -type IntEncoding byte - -const ( - Signed IntEncoding = 1 << iota - Char - Bool -) - -func (ie IntEncoding) IsSigned() bool { - return ie&Signed != 0 -} - -func (ie IntEncoding) IsChar() bool { - return ie&Char != 0 -} - -func (ie IntEncoding) IsBool() bool { - return ie&Bool != 0 -} - -// Int is an integer of a given length. -type Int struct { - TypeID - - Name string - - // The size of the integer in bytes. - Size uint32 - Encoding IntEncoding - // OffsetBits is the starting bit offset. Currently always 0. - // See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int - OffsetBits uint32 - Bits byte -} - -func (i *Int) String() string { - var s strings.Builder - - switch { - case i.Encoding.IsChar(): - s.WriteString("char") - case i.Encoding.IsBool(): - s.WriteString("bool") - default: - if !i.Encoding.IsSigned() { - s.WriteRune('u') - } - s.WriteString("int") - fmt.Fprintf(&s, "%d", i.Size*8) - } - - fmt.Fprintf(&s, "#%d", i.TypeID) - - if i.Bits > 0 { - fmt.Fprintf(&s, "[bits=%d]", i.Bits) - } - - return s.String() -} - -func (i *Int) TypeName() string { return i.Name } -func (i *Int) size() uint32 { return i.Size } -func (i *Int) walk(*typeDeque) {} -func (i *Int) copy() Type { - cpy := *i - return &cpy -} - -func (i *Int) isBitfield() bool { - return i.OffsetBits > 0 -} - -// Pointer is a pointer to another type. -type Pointer struct { - TypeID - Target Type -} - -func (p *Pointer) String() string { - return fmt.Sprintf("pointer#%d[target=#%d]", p.TypeID, p.Target.ID()) -} - -func (p *Pointer) TypeName() string { return "" } -func (p *Pointer) size() uint32 { return 8 } -func (p *Pointer) walk(tdq *typeDeque) { tdq.push(&p.Target) } -func (p *Pointer) copy() Type { - cpy := *p - return &cpy -} - -// Array is an array with a fixed number of elements. -type Array struct { - TypeID - Type Type - Nelems uint32 -} - -func (arr *Array) String() string { - return fmt.Sprintf("array#%d[type=#%d n=%d]", arr.TypeID, arr.Type.ID(), arr.Nelems) -} - -func (arr *Array) TypeName() string { return "" } - -func (arr *Array) walk(tdq *typeDeque) { tdq.push(&arr.Type) } -func (arr *Array) copy() Type { - cpy := *arr - return &cpy -} - -// Struct is a compound type of consecutive members. -type Struct struct { - TypeID - Name string - // The size of the struct including padding, in bytes - Size uint32 - Members []Member -} - -func (s *Struct) String() string { - return fmt.Sprintf("struct#%d[%q]", s.TypeID, s.Name) -} - -func (s *Struct) TypeName() string { return s.Name } - -func (s *Struct) size() uint32 { return s.Size } - -func (s *Struct) walk(tdq *typeDeque) { - for i := range s.Members { - tdq.push(&s.Members[i].Type) - } -} - -func (s *Struct) copy() Type { - cpy := *s - cpy.Members = copyMembers(s.Members) - return &cpy -} - -func (s *Struct) members() []Member { - return s.Members -} - -// Union is a compound type where members occupy the same memory. -type Union struct { - TypeID - Name string - // The size of the union including padding, in bytes. - Size uint32 - Members []Member -} - -func (u *Union) String() string { - return fmt.Sprintf("union#%d[%q]", u.TypeID, u.Name) -} - -func (u *Union) TypeName() string { return u.Name } - -func (u *Union) size() uint32 { return u.Size } - -func (u *Union) walk(tdq *typeDeque) { - for i := range u.Members { - tdq.push(&u.Members[i].Type) - } -} - -func (u *Union) copy() Type { - cpy := *u - cpy.Members = copyMembers(u.Members) - return &cpy -} - -func (u *Union) members() []Member { - return u.Members -} - -func copyMembers(orig []Member) []Member { - cpy := make([]Member, len(orig)) - copy(cpy, orig) - return cpy -} - -type composite interface { - members() []Member -} - -var ( - _ composite = (*Struct)(nil) - _ composite = (*Union)(nil) -) - -// Member is part of a Struct or Union. -// -// It is not a valid Type. -type Member struct { - Name string - Type Type - // OffsetBits is the bit offset of this member. - OffsetBits uint32 - BitfieldSize uint32 -} - -// Enum lists possible values. -type Enum struct { - TypeID - Name string - Values []EnumValue -} - -func (e *Enum) String() string { - return fmt.Sprintf("enum#%d[%q]", e.TypeID, e.Name) -} - -func (e *Enum) TypeName() string { return e.Name } - -// EnumValue is part of an Enum -// -// Is is not a valid Type -type EnumValue struct { - Name string - Value int32 -} - -func (e *Enum) size() uint32 { return 4 } -func (e *Enum) walk(*typeDeque) {} -func (e *Enum) copy() Type { - cpy := *e - cpy.Values = make([]EnumValue, len(e.Values)) - copy(cpy.Values, e.Values) - return &cpy -} - -// FwdKind is the type of forward declaration. -type FwdKind int - -// Valid types of forward declaration. -const ( - FwdStruct FwdKind = iota - FwdUnion -) - -func (fk FwdKind) String() string { - switch fk { - case FwdStruct: - return "struct" - case FwdUnion: - return "union" - default: - return fmt.Sprintf("%T(%d)", fk, int(fk)) - } -} - -// Fwd is a forward declaration of a Type. -type Fwd struct { - TypeID - Name string - Kind FwdKind -} - -func (f *Fwd) String() string { - return fmt.Sprintf("fwd#%d[%s %q]", f.TypeID, f.Kind, f.Name) -} - -func (f *Fwd) TypeName() string { return f.Name } - -func (f *Fwd) walk(*typeDeque) {} -func (f *Fwd) copy() Type { - cpy := *f - return &cpy -} - -// Typedef is an alias of a Type. -type Typedef struct { - TypeID - Name string - Type Type -} - -func (td *Typedef) String() string { - return fmt.Sprintf("typedef#%d[%q #%d]", td.TypeID, td.Name, td.Type.ID()) -} - -func (td *Typedef) TypeName() string { return td.Name } - -func (td *Typedef) walk(tdq *typeDeque) { tdq.push(&td.Type) } -func (td *Typedef) copy() Type { - cpy := *td - return &cpy -} - -// Volatile is a qualifier. -type Volatile struct { - TypeID - Type Type -} - -func (v *Volatile) String() string { - return fmt.Sprintf("volatile#%d[#%d]", v.TypeID, v.Type.ID()) -} - -func (v *Volatile) TypeName() string { return "" } - -func (v *Volatile) qualify() Type { return v.Type } -func (v *Volatile) walk(tdq *typeDeque) { tdq.push(&v.Type) } -func (v *Volatile) copy() Type { - cpy := *v - return &cpy -} - -// Const is a qualifier. -type Const struct { - TypeID - Type Type -} - -func (c *Const) String() string { - return fmt.Sprintf("const#%d[#%d]", c.TypeID, c.Type.ID()) -} - -func (c *Const) TypeName() string { return "" } - -func (c *Const) qualify() Type { return c.Type } -func (c *Const) walk(tdq *typeDeque) { tdq.push(&c.Type) } -func (c *Const) copy() Type { - cpy := *c - return &cpy -} - -// Restrict is a qualifier. -type Restrict struct { - TypeID - Type Type -} - -func (r *Restrict) String() string { - return fmt.Sprintf("restrict#%d[#%d]", r.TypeID, r.Type.ID()) -} - -func (r *Restrict) TypeName() string { return "" } - -func (r *Restrict) qualify() Type { return r.Type } -func (r *Restrict) walk(tdq *typeDeque) { tdq.push(&r.Type) } -func (r *Restrict) copy() Type { - cpy := *r - return &cpy -} - -// Func is a function definition. -type Func struct { - TypeID - Name string - Type Type - Linkage FuncLinkage -} - -func (f *Func) String() string { - return fmt.Sprintf("func#%d[%s %q proto=#%d]", f.TypeID, f.Linkage, f.Name, f.Type.ID()) -} - -func (f *Func) TypeName() string { return f.Name } - -func (f *Func) walk(tdq *typeDeque) { tdq.push(&f.Type) } -func (f *Func) copy() Type { - cpy := *f - return &cpy -} - -// FuncProto is a function declaration. -type FuncProto struct { - TypeID - Return Type - Params []FuncParam -} - -func (fp *FuncProto) String() string { - var s strings.Builder - fmt.Fprintf(&s, "proto#%d[", fp.TypeID) - for _, param := range fp.Params { - fmt.Fprintf(&s, "%q=#%d, ", param.Name, param.Type.ID()) - } - fmt.Fprintf(&s, "return=#%d]", fp.Return.ID()) - return s.String() -} - -func (fp *FuncProto) TypeName() string { return "" } - -func (fp *FuncProto) walk(tdq *typeDeque) { - tdq.push(&fp.Return) - for i := range fp.Params { - tdq.push(&fp.Params[i].Type) - } -} - -func (fp *FuncProto) copy() Type { - cpy := *fp - cpy.Params = make([]FuncParam, len(fp.Params)) - copy(cpy.Params, fp.Params) - return &cpy -} - -type FuncParam struct { - Name string - Type Type -} - -// Var is a global variable. -type Var struct { - TypeID - Name string - Type Type - Linkage VarLinkage -} - -func (v *Var) String() string { - return fmt.Sprintf("var#%d[%s %q]", v.TypeID, v.Linkage, v.Name) -} - -func (v *Var) TypeName() string { return v.Name } - -func (v *Var) walk(tdq *typeDeque) { tdq.push(&v.Type) } -func (v *Var) copy() Type { - cpy := *v - return &cpy -} - -// Datasec is a global program section containing data. -type Datasec struct { - TypeID - Name string - Size uint32 - Vars []VarSecinfo -} - -func (ds *Datasec) String() string { - return fmt.Sprintf("section#%d[%q]", ds.TypeID, ds.Name) -} - -func (ds *Datasec) TypeName() string { return ds.Name } - -func (ds *Datasec) size() uint32 { return ds.Size } - -func (ds *Datasec) walk(tdq *typeDeque) { - for i := range ds.Vars { - tdq.push(&ds.Vars[i].Type) - } -} - -func (ds *Datasec) copy() Type { - cpy := *ds - cpy.Vars = make([]VarSecinfo, len(ds.Vars)) - copy(cpy.Vars, ds.Vars) - return &cpy -} - -// VarSecinfo describes variable in a Datasec. -// -// It is not a valid Type. -type VarSecinfo struct { - Type Type - Offset uint32 - Size uint32 -} - -// Float is a float of a given length. -type Float struct { - TypeID - Name string - - // The size of the float in bytes. - Size uint32 -} - -func (f *Float) String() string { - return fmt.Sprintf("float%d#%d[%q]", f.Size*8, f.TypeID, f.Name) -} - -func (f *Float) TypeName() string { return f.Name } -func (f *Float) size() uint32 { return f.Size } -func (f *Float) walk(*typeDeque) {} -func (f *Float) copy() Type { - cpy := *f - return &cpy -} - -type sizer interface { - size() uint32 -} - -var ( - _ sizer = (*Int)(nil) - _ sizer = (*Pointer)(nil) - _ sizer = (*Struct)(nil) - _ sizer = (*Union)(nil) - _ sizer = (*Enum)(nil) - _ sizer = (*Datasec)(nil) -) - -type qualifier interface { - qualify() Type -} - -var ( - _ qualifier = (*Const)(nil) - _ qualifier = (*Restrict)(nil) - _ qualifier = (*Volatile)(nil) -) - -// Sizeof returns the size of a type in bytes. -// -// Returns an error if the size can't be computed. -func Sizeof(typ Type) (int, error) { - var ( - n = int64(1) - elem int64 - ) - - for i := 0; i < maxTypeDepth; i++ { - switch v := typ.(type) { - case *Array: - if n > 0 && int64(v.Nelems) > math.MaxInt64/n { - return 0, fmt.Errorf("type %s: overflow", typ) - } - - // Arrays may be of zero length, which allows - // n to be zero as well. - n *= int64(v.Nelems) - typ = v.Type - continue - - case sizer: - elem = int64(v.size()) - - case *Typedef: - typ = v.Type - continue - - case qualifier: - typ = v.qualify() - continue - - default: - return 0, fmt.Errorf("unsized type %T", typ) - } - - if n > 0 && elem > math.MaxInt64/n { - return 0, fmt.Errorf("type %s: overflow", typ) - } - - size := n * elem - if int64(int(size)) != size { - return 0, fmt.Errorf("type %s: overflow", typ) - } - - return int(size), nil - } - - return 0, fmt.Errorf("type %s: exceeded type depth", typ) -} - -// Copy a Type recursively. -func Copy(typ Type) Type { - typ, _ = copyType(typ, nil) - return typ -} - -// copy a Type recursively. -// -// typ may form a cycle. -// -// Returns any errors from transform verbatim. -func copyType(typ Type, transform func(Type) (Type, error)) (Type, error) { - copies := make(copier) - return typ, copies.copy(&typ, transform) -} - -// copy a slice of Types recursively. -// -// Types may form a cycle. -// -// Returns any errors from transform verbatim. -func copyTypes(types []Type, transform func(Type) (Type, error)) ([]Type, error) { - result := make([]Type, len(types)) - copy(result, types) - - copies := make(copier) - for i := range result { - if err := copies.copy(&result[i], transform); err != nil { - return nil, err - } - } - - return result, nil -} - -type copier map[Type]Type - -func (c copier) copy(typ *Type, transform func(Type) (Type, error)) error { - var work typeDeque - for t := typ; t != nil; t = work.pop() { - // *t is the identity of the type. - if cpy := c[*t]; cpy != nil { - *t = cpy - continue - } - - var cpy Type - if transform != nil { - tf, err := transform(*t) - if err != nil { - return fmt.Errorf("copy %s: %w", *t, err) - } - cpy = tf.copy() - } else { - cpy = (*t).copy() - } - - c[*t] = cpy - *t = cpy - - // Mark any nested types for copying. - cpy.walk(&work) - } - - return nil -} - -// typeDeque keeps track of pointers to types which still -// need to be visited. -type typeDeque struct { - types []*Type - read, write uint64 - mask uint64 -} - -func (dq *typeDeque) empty() bool { - return dq.read == dq.write -} - -// push adds a type to the stack. -func (dq *typeDeque) push(t *Type) { - if dq.write-dq.read < uint64(len(dq.types)) { - dq.types[dq.write&dq.mask] = t - dq.write++ - return - } - - new := len(dq.types) * 2 - if new == 0 { - new = 8 - } - - types := make([]*Type, new) - pivot := dq.read & dq.mask - n := copy(types, dq.types[pivot:]) - n += copy(types[n:], dq.types[:pivot]) - types[n] = t - - dq.types = types - dq.mask = uint64(new) - 1 - dq.read, dq.write = 0, uint64(n+1) -} - -// shift returns the first element or null. -func (dq *typeDeque) shift() *Type { - if dq.empty() { - return nil - } - - index := dq.read & dq.mask - t := dq.types[index] - dq.types[index] = nil - dq.read++ - return t -} - -// pop returns the last element or null. -func (dq *typeDeque) pop() *Type { - if dq.empty() { - return nil - } - - dq.write-- - index := dq.write & dq.mask - t := dq.types[index] - dq.types[index] = nil - return t -} - -// all returns all elements. -// -// The deque is empty after calling this method. -func (dq *typeDeque) all() []*Type { - length := dq.write - dq.read - types := make([]*Type, 0, length) - for t := dq.shift(); t != nil; t = dq.shift() { - types = append(types, t) - } - return types -} - -// inflateRawTypes takes a list of raw btf types linked via type IDs, and turns -// it into a graph of Types connected via pointers. -// -// Returns a map of named types (so, where NameOff is non-zero) and a slice of types -// indexed by TypeID. Since BTF ignores compilation units, multiple types may share -// the same name. A Type may form a cyclic graph by pointing at itself. -func inflateRawTypes(rawTypes []rawType, rawStrings stringTable) (types []Type, namedTypes map[essentialName][]Type, err error) { - type fixupDef struct { - id TypeID - expectedKind btfKind - typ *Type - } - - var fixups []fixupDef - fixup := func(id TypeID, expectedKind btfKind, typ *Type) { - fixups = append(fixups, fixupDef{id, expectedKind, typ}) - } - - convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) { - // NB: The fixup below relies on pre-allocating this array to - // work, since otherwise append might re-allocate members. - members := make([]Member, 0, len(raw)) - for i, btfMember := range raw { - name, err := rawStrings.Lookup(btfMember.NameOff) - if err != nil { - return nil, fmt.Errorf("can't get name for member %d: %w", i, err) - } - m := Member{ - Name: name, - OffsetBits: btfMember.Offset, - } - if kindFlag { - m.BitfieldSize = btfMember.Offset >> 24 - m.OffsetBits &= 0xffffff - } - members = append(members, m) - } - for i := range members { - fixup(raw[i].Type, kindUnknown, &members[i].Type) - } - return members, nil - } - - types = make([]Type, 0, len(rawTypes)) - types = append(types, (*Void)(nil)) - namedTypes = make(map[essentialName][]Type) - - for i, raw := range rawTypes { - var ( - // Void is defined to always be type ID 0, and is thus - // omitted from BTF. - id = TypeID(i + 1) - typ Type - ) - - name, err := rawStrings.Lookup(raw.NameOff) - if err != nil { - return nil, nil, fmt.Errorf("get name for type id %d: %w", id, err) - } - - switch raw.Kind() { - case kindInt: - encoding, offset, bits := intEncoding(*raw.data.(*uint32)) - typ = &Int{id, name, raw.Size(), encoding, offset, bits} - - case kindPointer: - ptr := &Pointer{id, nil} - fixup(raw.Type(), kindUnknown, &ptr.Target) - typ = ptr - - case kindArray: - btfArr := raw.data.(*btfArray) - - // IndexType is unused according to btf.rst. - // Don't make it available right now. - arr := &Array{id, nil, btfArr.Nelems} - fixup(btfArr.Type, kindUnknown, &arr.Type) - typ = arr - - case kindStruct: - members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag()) - if err != nil { - return nil, nil, fmt.Errorf("struct %s (id %d): %w", name, id, err) - } - typ = &Struct{id, name, raw.Size(), members} - - case kindUnion: - members, err := convertMembers(raw.data.([]btfMember), raw.KindFlag()) - if err != nil { - return nil, nil, fmt.Errorf("union %s (id %d): %w", name, id, err) - } - typ = &Union{id, name, raw.Size(), members} - - case kindEnum: - rawvals := raw.data.([]btfEnum) - vals := make([]EnumValue, 0, len(rawvals)) - for i, btfVal := range rawvals { - name, err := rawStrings.Lookup(btfVal.NameOff) - if err != nil { - return nil, nil, fmt.Errorf("get name for enum value %d: %s", i, err) - } - vals = append(vals, EnumValue{ - Name: name, - Value: btfVal.Val, - }) - } - typ = &Enum{id, name, vals} - - case kindForward: - if raw.KindFlag() { - typ = &Fwd{id, name, FwdUnion} - } else { - typ = &Fwd{id, name, FwdStruct} - } - - case kindTypedef: - typedef := &Typedef{id, name, nil} - fixup(raw.Type(), kindUnknown, &typedef.Type) - typ = typedef - - case kindVolatile: - volatile := &Volatile{id, nil} - fixup(raw.Type(), kindUnknown, &volatile.Type) - typ = volatile - - case kindConst: - cnst := &Const{id, nil} - fixup(raw.Type(), kindUnknown, &cnst.Type) - typ = cnst - - case kindRestrict: - restrict := &Restrict{id, nil} - fixup(raw.Type(), kindUnknown, &restrict.Type) - typ = restrict - - case kindFunc: - fn := &Func{id, name, nil, raw.Linkage()} - fixup(raw.Type(), kindFuncProto, &fn.Type) - typ = fn - - case kindFuncProto: - rawparams := raw.data.([]btfParam) - params := make([]FuncParam, 0, len(rawparams)) - for i, param := range rawparams { - name, err := rawStrings.Lookup(param.NameOff) - if err != nil { - return nil, nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err) - } - params = append(params, FuncParam{ - Name: name, - }) - } - for i := range params { - fixup(rawparams[i].Type, kindUnknown, ¶ms[i].Type) - } - - fp := &FuncProto{id, nil, params} - fixup(raw.Type(), kindUnknown, &fp.Return) - typ = fp - - case kindVar: - variable := raw.data.(*btfVariable) - v := &Var{id, name, nil, VarLinkage(variable.Linkage)} - fixup(raw.Type(), kindUnknown, &v.Type) - typ = v - - case kindDatasec: - btfVars := raw.data.([]btfVarSecinfo) - vars := make([]VarSecinfo, 0, len(btfVars)) - for _, btfVar := range btfVars { - vars = append(vars, VarSecinfo{ - Offset: btfVar.Offset, - Size: btfVar.Size, - }) - } - for i := range vars { - fixup(btfVars[i].Type, kindVar, &vars[i].Type) - } - typ = &Datasec{id, name, raw.SizeType, vars} - - case kindFloat: - typ = &Float{id, name, raw.Size()} - - default: - return nil, nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind()) - } - - types = append(types, typ) - - if name := newEssentialName(typ.TypeName()); name != "" { - namedTypes[name] = append(namedTypes[name], typ) - } - } - - for _, fixup := range fixups { - i := int(fixup.id) - if i >= len(types) { - return nil, nil, fmt.Errorf("reference to invalid type id: %d", fixup.id) - } - - // Default void (id 0) to unknown - rawKind := kindUnknown - if i > 0 { - rawKind = rawTypes[i-1].Kind() - } - - if expected := fixup.expectedKind; expected != kindUnknown && rawKind != expected { - return nil, nil, fmt.Errorf("expected type id %d to have kind %s, found %s", fixup.id, expected, rawKind) - } - - *fixup.typ = types[i] - } - - return types, namedTypes, nil -} - -// essentialName represents the name of a BTF type stripped of any flavor -// suffixes after a ___ delimiter. -type essentialName string - -// newEssentialName returns name without a ___ suffix. -// -// CO-RE has the concept of 'struct flavors', which are used to deal with -// changes in kernel data structures. Anything after three underscores -// in a type name is ignored for the purpose of finding a candidate type -// in the kernel's BTF. -func newEssentialName(name string) essentialName { - lastIdx := strings.LastIndex(name, "___") - if lastIdx > 0 { - return essentialName(name[:lastIdx]) - } - return essentialName(name) -} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/deque.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/deque.go new file mode 100644 index 000000000..ed113ddd7 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/deque.go @@ -0,0 +1,88 @@ +package internal + +import "math/bits" + +// Deque implements a double ended queue. +type Deque[T any] struct { + elems []T + read, write uint64 + mask uint64 +} + +// Reset clears the contents of the deque while retaining the backing buffer. +func (dq *Deque[T]) Reset() { + var zero T + + for i := dq.read; i < dq.write; i++ { + dq.elems[i&dq.mask] = zero + } + + dq.read, dq.write = 0, 0 +} + +func (dq *Deque[T]) Empty() bool { + return dq.read == dq.write +} + +// Push adds an element to the end. +func (dq *Deque[T]) Push(e T) { + dq.Grow(1) + dq.elems[dq.write&dq.mask] = e + dq.write++ +} + +// Shift returns the first element or the zero value. +func (dq *Deque[T]) Shift() T { + var zero T + + if dq.Empty() { + return zero + } + + index := dq.read & dq.mask + t := dq.elems[index] + dq.elems[index] = zero + dq.read++ + return t +} + +// Pop returns the last element or the zero value. +func (dq *Deque[T]) Pop() T { + var zero T + + if dq.Empty() { + return zero + } + + dq.write-- + index := dq.write & dq.mask + t := dq.elems[index] + dq.elems[index] = zero + return t +} + +// Grow the deque's capacity, if necessary, to guarantee space for another n +// elements. +func (dq *Deque[T]) Grow(n int) { + have := dq.write - dq.read + need := have + uint64(n) + if need < have { + panic("overflow") + } + if uint64(len(dq.elems)) >= need { + return + } + + // Round up to the new power of two which is at least 8. + // See https://jameshfisher.com/2018/03/30/round-up-power-2/ + capacity := max(1<<(64-bits.LeadingZeros64(need-1)), 8) + + elems := make([]T, have, capacity) + pivot := dq.read & dq.mask + copied := copy(elems, dq.elems[pivot:]) + copy(elems[copied:], dq.elems[:pivot]) + + dq.elems = elems[:capacity] + dq.mask = uint64(capacity) - 1 + dq.read, dq.write = 0, have +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/enums.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/enums.go new file mode 100644 index 000000000..71320b631 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/enums.go @@ -0,0 +1,65 @@ +//go:build windows + +package efw + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +/* +Converts an attach type enum into a GUID. + + ebpf_result_t ebpf_get_ebpf_attach_type( + bpf_attach_type_t bpf_attach_type, + _Out_ ebpf_attach_type_t* ebpf_attach_type_t *ebpf_attach_type) +*/ +var ebpfGetEbpfAttachTypeProc = newProc("ebpf_get_ebpf_attach_type") + +func EbpfGetEbpfAttachType(attachType uint32) (windows.GUID, error) { + addr, err := ebpfGetEbpfAttachTypeProc.Find() + if err != nil { + return windows.GUID{}, err + } + + var attachTypeGUID windows.GUID + err = errorResult(syscall.SyscallN(addr, + uintptr(attachType), + uintptr(unsafe.Pointer(&attachTypeGUID)), + )) + return attachTypeGUID, err +} + +/* +Retrieve a program type given a GUID. + + bpf_prog_type_t ebpf_get_bpf_program_type(_In_ const ebpf_program_type_t* program_type) +*/ +var ebpfGetBpfProgramTypeProc = newProc("ebpf_get_bpf_program_type") + +func EbpfGetBpfProgramType(programType windows.GUID) (uint32, error) { + addr, err := ebpfGetBpfProgramTypeProc.Find() + if err != nil { + return 0, err + } + + return uint32Result(syscall.SyscallN(addr, uintptr(unsafe.Pointer(&programType)))), nil +} + +/* +Retrieve an attach type given a GUID. + + bpf_attach_type_t ebpf_get_bpf_attach_type(_In_ const ebpf_attach_type_t* ebpf_attach_type) +*/ +var ebpfGetBpfAttachTypeProc = newProc("ebpf_get_bpf_attach_type") + +func EbpfGetBpfAttachType(attachType windows.GUID) (uint32, error) { + addr, err := ebpfGetBpfAttachTypeProc.Find() + if err != nil { + return 0, err + } + + return uint32Result(syscall.SyscallN(addr, uintptr(unsafe.Pointer(&attachType)))), nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/error_reporting.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/error_reporting.go new file mode 100644 index 000000000..83b9a265e --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/error_reporting.go @@ -0,0 +1,155 @@ +//go:build windows + +package efw + +import ( + "errors" + "fmt" + "os" + "syscall" + "testing" + + "golang.org/x/sys/windows" +) + +func init() { + if !testing.Testing() { + return + } + + if isDebuggerPresent() { + return + } + + if err := configureCRTErrorReporting(); err != nil { + fmt.Fprintln(os.Stderr, "WARNING: Could not configure CRT error reporting, tests may hang:", err) + } +} + +var errErrorReportingAlreadyConfigured = errors.New("error reporting already configured") + +// Configure built-in error reporting of the C runtime library. +// +// The C runtime emits assertion failures into a graphical message box by default. +// This causes a hang in CI environments. This function configures the CRT to +// log to stderr instead. +func configureCRTErrorReporting() error { + const ucrtDebug = "ucrtbased.dll" + + // Constants from crtdbg.h + // + // See https://doxygen.reactos.org/da/d40/crt_2crtdbg_8h_source.html + const ( + _CRT_ERROR = 1 + _CRT_ASSERT = 2 + _CRTDBG_MODE_FILE = 0x1 + _CRTDBG_MODE_WNDW = 0x4 + _CRTDBG_HFILE_ERROR = -2 + _CRTDBG_FILE_STDERR = -4 + ) + + // Load the efW API to trigger loading the CRT. This may fail, in which case + // we can't figure out which CRT is being used. + // In that case we rely on the error bubbling up via some other path. + _ = module.Load() + + ucrtHandle, err := syscall.UTF16PtrFromString(ucrtDebug) + if err != nil { + return err + } + + var handle windows.Handle + err = windows.GetModuleHandleEx(0, ucrtHandle, &handle) + if errors.Is(err, windows.ERROR_MOD_NOT_FOUND) { + // Loading the ebpf api did not pull in the debug UCRT, so there is + // nothing to configure. + return nil + } else if err != nil { + return err + } + defer windows.FreeLibrary(handle) + + setReportModeAddr, err := windows.GetProcAddress(handle, "_CrtSetReportMode") + if err != nil { + return err + } + + setReportMode := func(reportType int, reportMode int) (int, error) { + // See https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/crtsetreportmode?view=msvc-170 + r1, _, err := syscall.SyscallN(setReportModeAddr, uintptr(reportType), uintptr(reportMode)) + if int(r1) == -1 { + return 0, fmt.Errorf("set report mode for type %d: %w", reportType, err) + } + return int(r1), nil + } + + setReportFileAddr, err := windows.GetProcAddress(handle, "_CrtSetReportFile") + if err != nil { + return err + } + + setReportFile := func(reportType int, reportFile int) (int, error) { + // See https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/crtsetreportfile?view=msvc-170 + r1, _, err := syscall.SyscallN(setReportFileAddr, uintptr(reportType), uintptr(reportFile)) + if int(r1) == _CRTDBG_HFILE_ERROR { + return 0, fmt.Errorf("set report file for type %d: %w", reportType, err) + } + return int(r1), nil + } + + reportToFile := func(reportType, defaultMode int) error { + oldMode, err := setReportMode(reportType, _CRTDBG_MODE_FILE) + if err != nil { + return err + } + + if oldMode != defaultMode { + // Attempt to restore old mode if it was different from the expected default. + _, _ = setReportMode(reportType, oldMode) + return errErrorReportingAlreadyConfigured + } + + oldFile, err := setReportFile(reportType, _CRTDBG_FILE_STDERR) + if err != nil { + return err + } + + if oldFile != -1 { + // Attempt to restore old file if it was different from the expected default. + _, _ = setReportFile(reportType, oldFile) + return errErrorReportingAlreadyConfigured + } + + return nil + } + + // See https://learn.microsoft.com/en-us/cpp/c-runtime-library/reference/crtsetreportmode?view=msvc-170#remarks + // for defaults. + if err := reportToFile(_CRT_ASSERT, _CRTDBG_MODE_WNDW); err != nil { + return err + } + + if err := reportToFile(_CRT_ERROR, _CRTDBG_MODE_WNDW); err != nil { + return err + } + + return nil +} + +// isDebuggerPresent returns true if the current process is being debugged. +// +// See https://learn.microsoft.com/en-us/windows/win32/api/debugapi/nf-debugapi-isdebuggerpresent +func isDebuggerPresent() bool { + kernel32Handle, err := windows.LoadLibrary("kernel32.dll") + if err != nil { + return false + } + + isDebuggerPresentAddr, err := windows.GetProcAddress(kernel32Handle, "IsDebuggerPresent") + if err != nil { + return false + } + + r1, _, _ := syscall.SyscallN(isDebuggerPresentAddr) + return r1 != 0 +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/fd.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/fd.go new file mode 100644 index 000000000..b0d0bcdd4 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/fd.go @@ -0,0 +1,34 @@ +//go:build windows + +package efw + +import ( + "syscall" + "unsafe" +) + +// ebpf_result_t ebpf_close_fd(fd_t fd) +var ebpfCloseFdProc = newProc("ebpf_close_fd") + +func EbpfCloseFd(fd int) error { + addr, err := ebpfCloseFdProc.Find() + if err != nil { + return err + } + + return errorResult(syscall.SyscallN(addr, uintptr(fd))) +} + +// ebpf_result_t ebpf_duplicate_fd(fd_t fd, _Out_ fd_t* dup) +var ebpfDuplicateFdProc = newProc("ebpf_duplicate_fd") + +func EbpfDuplicateFd(fd int) (int, error) { + addr, err := ebpfDuplicateFdProc.Find() + if err != nil { + return -1, err + } + + var dup FD + err = errorResult(syscall.SyscallN(addr, uintptr(fd), uintptr(unsafe.Pointer(&dup)))) + return int(dup), err +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/map.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/map.go new file mode 100644 index 000000000..82f510fef --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/map.go @@ -0,0 +1,109 @@ +//go:build windows + +package efw + +import ( + "runtime" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +/* +ebpf_ring_buffer_map_map_buffer( + + fd_t map_fd, + _Outptr_result_maybenull_ void** consumer, + _Outptr_result_maybenull_ const void** producer, + _Outptr_result_buffer_maybenull_(*data_size) const uint8_t** data, + _Out_ size_t* data_size) EBPF_NO_EXCEPT; +*/ +var ebpfRingBufferMapMapBufferProc = newProc("ebpf_ring_buffer_map_map_buffer") + +func EbpfRingBufferMapMapBuffer(mapFd int) (consumer, producer, data *uint8, dataLen Size, _ error) { + addr, err := ebpfRingBufferMapMapBufferProc.Find() + if err != nil { + return nil, nil, nil, 0, err + } + + err = errorResult(syscall.SyscallN(addr, + uintptr(mapFd), + uintptr(unsafe.Pointer(&consumer)), + uintptr(unsafe.Pointer(&producer)), + uintptr(unsafe.Pointer(&data)), + uintptr(unsafe.Pointer(&dataLen)), + )) + if err != nil { + return nil, nil, nil, 0, err + } + + return consumer, producer, data, dataLen, nil +} + +/* +ebpf_ring_buffer_map_unmap_buffer( + + fd_t map_fd, _In_ void* consumer, _In_ const void* producer, _In_ const void* data) EBPF_NO_EXCEPT; +*/ +var ebpfRingBufferMapUnmapBufferProc = newProc("ebpf_ring_buffer_map_unmap_buffer") + +func EbpfRingBufferMapUnmapBuffer(mapFd int, consumer, producer, data *uint8) error { + addr, err := ebpfRingBufferMapUnmapBufferProc.Find() + if err != nil { + return err + } + + return errorResult(syscall.SyscallN(addr, + uintptr(mapFd), + uintptr(unsafe.Pointer(consumer)), + uintptr(unsafe.Pointer(producer)), + uintptr(unsafe.Pointer(data)), + )) +} + +/* +ebpf_result_t ebpf_map_set_wait_handle( + + fd_t map_fd, + uint64_t index, + ebpf_handle_t handle) +*/ +var ebpfMapSetWaitHandleProc = newProc("ebpf_map_set_wait_handle") + +func EbpfMapSetWaitHandle(mapFd int, index uint64, handle windows.Handle) error { + addr, err := ebpfMapSetWaitHandleProc.Find() + if err != nil { + return err + } + + return errorResult(syscall.SyscallN(addr, + uintptr(mapFd), + uintptr(index), + uintptr(handle), + )) +} + +/* +ebpf_result_t ebpf_ring_buffer_map_write( + + fd_t ring_buffer_map_fd, + const void* data, + size_t data_length) +*/ +var ebpfRingBufferMapWriteProc = newProc("ebpf_ring_buffer_map_write") + +func EbpfRingBufferMapWrite(ringBufferMapFd int, data []byte) error { + addr, err := ebpfRingBufferMapWriteProc.Find() + if err != nil { + return err + } + + err = errorResult(syscall.SyscallN(addr, + uintptr(ringBufferMapFd), + uintptr(unsafe.Pointer(&data[0])), + uintptr(len(data)), + )) + runtime.KeepAlive(data) + return err +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/module.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/module.go new file mode 100644 index 000000000..606d83930 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/module.go @@ -0,0 +1,36 @@ +//go:build windows + +// Package efw contains support code for eBPF for Windows. +package efw + +import ( + "golang.org/x/sys/windows" +) + +// module is the global handle for the eBPF for Windows user-space API. +var module = windows.NewLazyDLL("ebpfapi.dll") + +// FD is the equivalent of fd_t. +// +// See https://github.com/microsoft/ebpf-for-windows/blob/54632eb360c560ebef2f173be1a4a4625d540744/include/ebpf_api.h#L24 +type FD int32 + +// Size is the equivalent of size_t. +// +// This is correct on amd64 and arm64 according to tests on godbolt.org. +type Size uint64 + +// Int is the equivalent of int on MSVC (am64, arm64) and MinGW (gcc, clang). +type Int int32 + +// ObjectType is the equivalent of ebpf_object_type_t. +// +// See https://github.com/microsoft/ebpf-for-windows/blob/44f5de09ec0f3f7ad176c00a290c1cb7106cdd5e/include/ebpf_core_structs.h#L41 +type ObjectType uint32 + +const ( + EBPF_OBJECT_UNKNOWN ObjectType = iota + EBPF_OBJECT_MAP + EBPF_OBJECT_LINK + EBPF_OBJECT_PROGRAM +) diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/native.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/native.go new file mode 100644 index 000000000..04f796abb --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/native.go @@ -0,0 +1,44 @@ +//go:build windows + +package efw + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +/* +ebpf_result_t ebpf_object_load_native_by_fds( + + _In_z_ const char* file_name, + _Inout_ size_t* count_of_maps, + _Out_writes_opt_(count_of_maps) fd_t* map_fds, + _Inout_ size_t* count_of_programs, + _Out_writes_opt_(count_of_programs) fd_t* program_fds) +*/ +var ebpfObjectLoadNativeByFdsProc = newProc("ebpf_object_load_native_by_fds") + +func EbpfObjectLoadNativeFds(fileName string, mapFds []FD, programFds []FD) (int, int, error) { + addr, err := ebpfObjectLoadNativeByFdsProc.Find() + if err != nil { + return 0, 0, err + } + + fileBytes, err := windows.ByteSliceFromString(fileName) + if err != nil { + return 0, 0, err + } + + countOfMaps := Size(len(mapFds)) + countOfPrograms := Size(len(programFds)) + err = errorResult(syscall.SyscallN(addr, + uintptr(unsafe.Pointer(&fileBytes[0])), + uintptr(unsafe.Pointer(&countOfMaps)), + uintptr(unsafe.Pointer(&mapFds[0])), + uintptr(unsafe.Pointer(&countOfPrograms)), + uintptr(unsafe.Pointer(&programFds[0])), + )) + return int(countOfMaps), int(countOfPrograms), err +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/object.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/object.go new file mode 100644 index 000000000..560e2f09b --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/object.go @@ -0,0 +1,117 @@ +//go:build windows + +package efw + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +// https://github.com/microsoft/ebpf-for-windows/blob/9d9003c39c3fd75be5225ac0fce30077d6bf0604/include/ebpf_core_structs.h#L15 +const _EBPF_MAX_PIN_PATH_LENGTH = 256 + +/* +Retrieve object info and type from a fd. + + ebpf_result_t ebpf_object_get_info_by_fd( + fd_t bpf_fd, + _Inout_updates_bytes_to_opt_(*info_size, *info_size) void* info, + _Inout_opt_ uint32_t* info_size, + _Out_opt_ ebpf_object_type_t* type) +*/ +var ebpfObjectGetInfoByFdProc = newProc("ebpf_object_get_info_by_fd") + +func EbpfObjectGetInfoByFd(fd int, info unsafe.Pointer, info_size *uint32) (ObjectType, error) { + addr, err := ebpfObjectGetInfoByFdProc.Find() + if err != nil { + return 0, err + } + + var objectType ObjectType + err = errorResult(syscall.SyscallN(addr, + uintptr(fd), + uintptr(info), + uintptr(unsafe.Pointer(info_size)), + uintptr(unsafe.Pointer(&objectType)), + )) + return objectType, err +} + +// ebpf_result_t ebpf_object_unpin(_In_z_ const char* path) +var ebpfObjectUnpinProc = newProc("ebpf_object_unpin") + +func EbpfObjectUnpin(path string) error { + addr, err := ebpfObjectUnpinProc.Find() + if err != nil { + return err + } + + pathBytes, err := windows.ByteSliceFromString(path) + if err != nil { + return err + } + + return errorResult(syscall.SyscallN(addr, uintptr(unsafe.Pointer(&pathBytes[0])))) +} + +/* +Retrieve the next pinned object path. + + ebpf_result_t ebpf_get_next_pinned_object_path( + _In_opt_z_ const char* start_path, + _Out_writes_z_(next_path_len) char* next_path, + size_t next_path_len, + _Inout_opt_ ebpf_object_type_t* type) +*/ +var ebpfGetNextPinnedObjectPath = newProc("ebpf_get_next_pinned_object_path") + +func EbpfGetNextPinnedObjectPath(startPath string, objectType ObjectType) (string, ObjectType, error) { + addr, err := ebpfGetNextPinnedObjectPath.Find() + if err != nil { + return "", 0, err + } + + ptr, err := windows.BytePtrFromString(startPath) + if err != nil { + return "", 0, err + } + + tmp := make([]byte, _EBPF_MAX_PIN_PATH_LENGTH) + err = errorResult(syscall.SyscallN(addr, + uintptr(unsafe.Pointer(ptr)), + uintptr(unsafe.Pointer(&tmp[0])), + uintptr(len(tmp)), + uintptr(unsafe.Pointer(&objectType)), + )) + return windows.ByteSliceToString(tmp), objectType, err +} + +/* +Canonicalize a path using filesystem canonicalization rules. + + _Must_inspect_result_ ebpf_result_t + ebpf_canonicalize_pin_path(_Out_writes_(output_size) char* output, size_t output_size, _In_z_ const char* input) +*/ +var ebpfCanonicalizePinPath = newProc("ebpf_canonicalize_pin_path") + +func EbpfCanonicalizePinPath(input string) (string, error) { + addr, err := ebpfCanonicalizePinPath.Find() + if err != nil { + return "", err + } + + inputBytes, err := windows.ByteSliceFromString(input) + if err != nil { + return "", err + } + + output := make([]byte, _EBPF_MAX_PIN_PATH_LENGTH) + err = errorResult(syscall.SyscallN(addr, + uintptr(unsafe.Pointer(&output[0])), + uintptr(len(output)), + uintptr(unsafe.Pointer(&inputBytes[0])), + )) + return windows.ByteSliceToString(output), err +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/proc.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/proc.go new file mode 100644 index 000000000..81329905f --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/proc.go @@ -0,0 +1,50 @@ +//go:build windows + +package efw + +import ( + "errors" + "fmt" + "syscall" + + "golang.org/x/sys/windows" +) + +/* +The BPF syscall wrapper which is ABI compatible with Linux. + + int bpf(int cmd, union bpf_attr* attr, unsigned int size) +*/ +var BPF = newProc("bpf") + +type proc struct { + proc *windows.LazyProc +} + +func newProc(name string) proc { + return proc{module.NewProc(name)} +} + +func (p proc) Find() (uintptr, error) { + if err := p.proc.Find(); err != nil { + if errors.Is(err, windows.ERROR_MOD_NOT_FOUND) { + return 0, fmt.Errorf("load %s: not found", module.Name) + } + return 0, err + } + return p.proc.Addr(), nil +} + +// uint32Result wraps a function which returns a uint32_t. +func uint32Result(r1, _ uintptr, _ syscall.Errno) uint32 { + return uint32(r1) +} + +// errorResult wraps a function which returns ebpf_result_t. +func errorResult(r1, _ uintptr, errNo syscall.Errno) error { + err := resultToError(Result(r1)) + if err != nil && errNo != 0 { + return fmt.Errorf("%w (errno: %v)", err, errNo) + } + return err +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/program.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/program.go new file mode 100644 index 000000000..6202acf32 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/program.go @@ -0,0 +1,39 @@ +//go:build windows + +package efw + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +/* +Attach a program. + + ebpf_result_t ebpf_program_attach_by_fds( + fd_t program_fd, + _In_opt_ const ebpf_attach_type_t* attach_type, + _In_reads_bytes_opt_(attach_parameters_size) void* attach_parameters, + size_t attach_parameters_size, + _Out_ fd_t* link) +*/ +var ebpfProgramAttachByFdsProc = newProc("ebpf_program_attach_by_fds") + +func EbpfProgramAttachFds(fd int, attachType windows.GUID, params unsafe.Pointer, params_size uintptr) (int, error) { + addr, err := ebpfProgramAttachByFdsProc.Find() + if err != nil { + return 0, err + } + + var link FD + err = errorResult(syscall.SyscallN(addr, + uintptr(fd), + uintptr(unsafe.Pointer(&attachType)), + uintptr(params), + params_size, + uintptr(unsafe.Pointer(&link)), + )) + return int(link), err +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/result.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/result.go new file mode 100644 index 000000000..4c68da931 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/result.go @@ -0,0 +1,57 @@ +//go:build windows + +package efw + +// See https://github.com/microsoft/ebpf-for-windows/blob/main/include/ebpf_result.h +type Result int32 + +//go:generate go tool stringer -tags windows -output result_string_windows.go -type=Result + +const ( + EBPF_SUCCESS Result = iota + EBPF_VERIFICATION_FAILED + EBPF_JIT_COMPILATION_FAILED + EBPF_PROGRAM_LOAD_FAILED + EBPF_INVALID_FD + EBPF_INVALID_OBJECT + EBPF_INVALID_ARGUMENT + EBPF_OBJECT_NOT_FOUND + EBPF_OBJECT_ALREADY_EXISTS + EBPF_FILE_NOT_FOUND + EBPF_ALREADY_PINNED + EBPF_NOT_PINNED + EBPF_NO_MEMORY + EBPF_PROGRAM_TOO_LARGE + EBPF_RPC_EXCEPTION + EBPF_ALREADY_INITIALIZED + EBPF_ELF_PARSING_FAILED + EBPF_FAILED + EBPF_OPERATION_NOT_SUPPORTED + EBPF_KEY_NOT_FOUND + EBPF_ACCESS_DENIED + EBPF_BLOCKED_BY_POLICY + EBPF_ARITHMETIC_OVERFLOW + EBPF_EXTENSION_FAILED_TO_LOAD + EBPF_INSUFFICIENT_BUFFER + EBPF_NO_MORE_KEYS + EBPF_KEY_ALREADY_EXISTS + EBPF_NO_MORE_TAIL_CALLS + EBPF_PENDING + EBPF_OUT_OF_SPACE + EBPF_CANCELED + EBPF_INVALID_POINTER + EBPF_TIMEOUT + EBPF_STALE_ID + EBPF_INVALID_STATE +) + +func (r Result) Error() string { + return r.String() +} + +func resultToError(res Result) error { + if res == EBPF_SUCCESS { + return nil + } + return res +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/result_string_windows.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/result_string_windows.go new file mode 100644 index 000000000..1e55b5186 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/result_string_windows.go @@ -0,0 +1,57 @@ +// Code generated by "stringer -tags windows -output result_string_windows.go -type=Result"; DO NOT EDIT. + +package efw + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[EBPF_SUCCESS-0] + _ = x[EBPF_VERIFICATION_FAILED-1] + _ = x[EBPF_JIT_COMPILATION_FAILED-2] + _ = x[EBPF_PROGRAM_LOAD_FAILED-3] + _ = x[EBPF_INVALID_FD-4] + _ = x[EBPF_INVALID_OBJECT-5] + _ = x[EBPF_INVALID_ARGUMENT-6] + _ = x[EBPF_OBJECT_NOT_FOUND-7] + _ = x[EBPF_OBJECT_ALREADY_EXISTS-8] + _ = x[EBPF_FILE_NOT_FOUND-9] + _ = x[EBPF_ALREADY_PINNED-10] + _ = x[EBPF_NOT_PINNED-11] + _ = x[EBPF_NO_MEMORY-12] + _ = x[EBPF_PROGRAM_TOO_LARGE-13] + _ = x[EBPF_RPC_EXCEPTION-14] + _ = x[EBPF_ALREADY_INITIALIZED-15] + _ = x[EBPF_ELF_PARSING_FAILED-16] + _ = x[EBPF_FAILED-17] + _ = x[EBPF_OPERATION_NOT_SUPPORTED-18] + _ = x[EBPF_KEY_NOT_FOUND-19] + _ = x[EBPF_ACCESS_DENIED-20] + _ = x[EBPF_BLOCKED_BY_POLICY-21] + _ = x[EBPF_ARITHMETIC_OVERFLOW-22] + _ = x[EBPF_EXTENSION_FAILED_TO_LOAD-23] + _ = x[EBPF_INSUFFICIENT_BUFFER-24] + _ = x[EBPF_NO_MORE_KEYS-25] + _ = x[EBPF_KEY_ALREADY_EXISTS-26] + _ = x[EBPF_NO_MORE_TAIL_CALLS-27] + _ = x[EBPF_PENDING-28] + _ = x[EBPF_OUT_OF_SPACE-29] + _ = x[EBPF_CANCELED-30] + _ = x[EBPF_INVALID_POINTER-31] + _ = x[EBPF_TIMEOUT-32] + _ = x[EBPF_STALE_ID-33] + _ = x[EBPF_INVALID_STATE-34] +} + +const _Result_name = "EBPF_SUCCESSEBPF_VERIFICATION_FAILEDEBPF_JIT_COMPILATION_FAILEDEBPF_PROGRAM_LOAD_FAILEDEBPF_INVALID_FDEBPF_INVALID_OBJECTEBPF_INVALID_ARGUMENTEBPF_OBJECT_NOT_FOUNDEBPF_OBJECT_ALREADY_EXISTSEBPF_FILE_NOT_FOUNDEBPF_ALREADY_PINNEDEBPF_NOT_PINNEDEBPF_NO_MEMORYEBPF_PROGRAM_TOO_LARGEEBPF_RPC_EXCEPTIONEBPF_ALREADY_INITIALIZEDEBPF_ELF_PARSING_FAILEDEBPF_FAILEDEBPF_OPERATION_NOT_SUPPORTEDEBPF_KEY_NOT_FOUNDEBPF_ACCESS_DENIEDEBPF_BLOCKED_BY_POLICYEBPF_ARITHMETIC_OVERFLOWEBPF_EXTENSION_FAILED_TO_LOADEBPF_INSUFFICIENT_BUFFEREBPF_NO_MORE_KEYSEBPF_KEY_ALREADY_EXISTSEBPF_NO_MORE_TAIL_CALLSEBPF_PENDINGEBPF_OUT_OF_SPACEEBPF_CANCELEDEBPF_INVALID_POINTEREBPF_TIMEOUTEBPF_STALE_IDEBPF_INVALID_STATE" + +var _Result_index = [...]uint16{0, 12, 36, 63, 87, 102, 121, 142, 163, 189, 208, 227, 242, 256, 278, 296, 320, 343, 354, 382, 400, 418, 440, 464, 493, 517, 534, 557, 580, 592, 609, 622, 642, 654, 667, 685} + +func (i Result) String() string { + if i < 0 || i >= Result(len(_Result_index)-1) { + return "Result(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Result_name[_Result_index[i]:_Result_index[i+1]] +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/structs.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/structs.go new file mode 100644 index 000000000..558dbb865 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/efw/structs.go @@ -0,0 +1,36 @@ +//go:build windows + +package efw + +import "golang.org/x/sys/windows" + +// https://github.com/microsoft/ebpf-for-windows/blob/95267a53b26c68a94145d1731e2a4c8b546034c3/include/ebpf_structs.h#L366 +const _BPF_OBJ_NAME_LEN = 64 + +// See https://github.com/microsoft/ebpf-for-windows/blob/95267a53b26c68a94145d1731e2a4c8b546034c3/include/ebpf_structs.h#L372-L386 +type BpfMapInfo struct { + _ uint32 ///< Map ID. + _ uint32 ///< Type of map. + _ uint32 ///< Size in bytes of a map key. + _ uint32 ///< Size in bytes of a map value. + _ uint32 ///< Maximum number of entries allowed in the map. + Name [_BPF_OBJ_NAME_LEN]byte ///< Null-terminated map name. + _ uint32 ///< Map flags. + + _ uint32 ///< ID of inner map template. + _ uint32 ///< Number of pinned paths. +} + +// See https://github.com/microsoft/ebpf-for-windows/blob/95267a53b26c68a94145d1731e2a4c8b546034c3/include/ebpf_structs.h#L396-L410 +type BpfProgInfo struct { + _ uint32 ///< Program ID. + _ uint32 ///< Program type, if a cross-platform type. + _ uint32 ///< Number of maps associated with this program. + _ uintptr ///< Pointer to caller-allocated array to fill map IDs into. + Name [_BPF_OBJ_NAME_LEN]byte ///< Null-terminated map name. + + _ windows.GUID ///< Program type UUID. + _ windows.GUID ///< Attach type UUID. + _ uint32 ///< Number of pinned paths. + _ uint32 ///< Number of attached links. +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/elf.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/elf.go index 54a431313..011581938 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/elf.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/elf.go @@ -35,6 +35,29 @@ func NewSafeELFFile(r io.ReaderAt) (safe *SafeELFFile, err error) { return &SafeELFFile{file}, nil } +// OpenSafeELFFile reads an ELF from a file. +// +// It works like NewSafeELFFile, with the exception that safe.Close will +// close the underlying file. +func OpenSafeELFFile(path string) (safe *SafeELFFile, err error) { + defer func() { + r := recover() + if r == nil { + return + } + + safe = nil + err = fmt.Errorf("reading ELF file panicked: %s", r) + }() + + file, err := elf.Open(path) + if err != nil { + return nil, err + } + + return &SafeELFFile{file}, nil +} + // Symbols is the safe version of elf.File.Symbols. func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) { defer func() { @@ -66,3 +89,14 @@ func (se *SafeELFFile) DynamicSymbols() (syms []elf.Symbol, err error) { syms, err = se.File.DynamicSymbols() return } + +// SectionsByType returns all sections in the file with the specified section type. +func (se *SafeELFFile) SectionsByType(typ elf.SectionType) []*elf.Section { + sections := make([]*elf.Section, 0, 1) + for _, section := range se.Sections { + if section.Type == typ { + sections = append(sections, section) + } + } + return sections +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/endian.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/endian.go deleted file mode 100644 index 6ae99fcd5..000000000 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/endian.go +++ /dev/null @@ -1,29 +0,0 @@ -package internal - -import ( - "encoding/binary" - "unsafe" -) - -// NativeEndian is set to either binary.BigEndian or binary.LittleEndian, -// depending on the host's endianness. -var NativeEndian binary.ByteOrder - -// Clang is set to either "el" or "eb" depending on the host's endianness. -var ClangEndian string - -func init() { - if isBigEndian() { - NativeEndian = binary.BigEndian - ClangEndian = "eb" - } else { - NativeEndian = binary.LittleEndian - ClangEndian = "el" - } -} - -func isBigEndian() (ret bool) { - i := int(0x1) - bs := (*[int(unsafe.Sizeof(i))]byte)(unsafe.Pointer(&i)) - return bs[0] == 0 -} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/endian_be.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/endian_be.go new file mode 100644 index 000000000..a37777f21 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/endian_be.go @@ -0,0 +1,9 @@ +//go:build armbe || arm64be || mips || mips64 || mips64p32 || ppc64 || s390 || s390x || sparc || sparc64 + +package internal + +import "encoding/binary" + +// NativeEndian is set to either binary.BigEndian or binary.LittleEndian, +// depending on the host's endianness. +var NativeEndian = binary.BigEndian diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/endian_le.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/endian_le.go new file mode 100644 index 000000000..d833ea764 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/endian_le.go @@ -0,0 +1,9 @@ +//go:build 386 || amd64 || amd64p32 || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || ppc64le || riscv64 || wasm + +package internal + +import "encoding/binary" + +// NativeEndian is set to either binary.BigEndian or binary.LittleEndian, +// depending on the host's endianness. +var NativeEndian = binary.LittleEndian diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/errors.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/errors.go index b9716cd61..19d5294ca 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/errors.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/errors.go @@ -1,41 +1,179 @@ package internal import ( - "errors" + "bytes" "fmt" + "io" "strings" - - "github.com/cilium/ebpf/internal/unix" ) -// ErrorWithLog returns an error that includes logs from the -// kernel verifier. +// ErrorWithLog wraps err in a VerifierError that includes the parsed verifier +// log buffer. // -// logErr should be the error returned by the syscall that generated -// the log. It is used to check for truncation of the output. -func ErrorWithLog(err error, log []byte, logErr error) error { - logStr := strings.Trim(unix.ByteSliceToString(log), "\t\r\n ") - if errors.Is(logErr, unix.ENOSPC) { - logStr += " (truncated...)" +// The default error output is a summary of the full log. The latter can be +// accessed via VerifierError.Log or by formatting the error, see Format. +func ErrorWithLog(source string, err error, log []byte) *VerifierError { + const whitespace = "\t\r\v\n " + + // Convert verifier log C string by truncating it on the first 0 byte + // and trimming trailing whitespace before interpreting as a Go string. + if i := bytes.IndexByte(log, 0); i != -1 { + log = log[:i] + } + + log = bytes.Trim(log, whitespace) + if len(log) == 0 { + return &VerifierError{source, err, nil} + } + + logLines := bytes.Split(log, []byte{'\n'}) + lines := make([]string, 0, len(logLines)) + for _, line := range logLines { + // Don't remove leading white space on individual lines. We rely on it + // when outputting logs. + lines = append(lines, string(bytes.TrimRight(line, whitespace))) } - return &VerifierError{err, logStr} + return &VerifierError{source, err, lines} } // VerifierError includes information from the eBPF verifier. +// +// It summarises the log output, see Format if you want to output the full contents. type VerifierError struct { - cause error - log string + source string + // The error which caused this error. + Cause error + // The verifier output split into lines. + Log []string } func (le *VerifierError) Unwrap() error { - return le.cause + return le.Cause } func (le *VerifierError) Error() string { - if le.log == "" { - return le.cause.Error() + log := le.Log + if n := len(log); n > 0 && strings.HasPrefix(log[n-1], "processed ") { + // Get rid of "processed 39 insns (limit 1000000) ..." from summary. + log = log[:n-1] + } + + var b strings.Builder + fmt.Fprintf(&b, "%s: %s", le.source, le.Cause.Error()) + + n := len(log) + if n == 0 { + return b.String() + } + + lines := log[n-1:] + if n >= 2 && includePreviousLine(log[n-1]) { + // Add one more line of context if it aids understanding the error. + lines = log[n-2:] + } + + for _, line := range lines { + b.WriteString(": ") + b.WriteString(strings.TrimSpace(line)) + } + + omitted := len(le.Log) - len(lines) + if omitted > 0 { + fmt.Fprintf(&b, " (%d line(s) omitted)", omitted) + } + + return b.String() +} + +// includePreviousLine returns true if the given line likely is better +// understood with additional context from the preceding line. +func includePreviousLine(line string) bool { + // We need to find a good trade off between understandable error messages + // and too much complexity here. Checking the string prefix is ok, requiring + // regular expressions to do it is probably overkill. + + if strings.HasPrefix(line, "\t") { + // [13] STRUCT drm_rect size=16 vlen=4 + // \tx1 type_id=2 + return true + } + + if len(line) >= 2 && line[0] == 'R' && line[1] >= '0' && line[1] <= '9' { + // 0: (95) exit + // R0 !read_ok + return true + } + + if strings.HasPrefix(line, "invalid bpf_context access") { + // 0: (79) r6 = *(u64 *)(r1 +0) + // func '__x64_sys_recvfrom' arg0 type FWD is not a struct + // invalid bpf_context access off=0 size=8 + return true } - return fmt.Sprintf("%s: %s", le.cause, le.log) + return false +} + +// Format the error. +// +// Understood verbs are %s and %v, which are equivalent to calling Error(). %v +// allows outputting additional information using the following flags: +// +// %+v: Output the first lines, or all lines if no width is given. +// %-v: Output the last lines, or all lines if no width is given. +// +// Use width to specify how many lines to output. Use the '-' flag to output +// lines from the end of the log instead of the beginning. +func (le *VerifierError) Format(f fmt.State, verb rune) { + switch verb { + case 's': + _, _ = io.WriteString(f, le.Error()) + + case 'v': + n, haveWidth := f.Width() + if !haveWidth || n > len(le.Log) { + n = len(le.Log) + } + + if !f.Flag('+') && !f.Flag('-') { + if haveWidth { + _, _ = io.WriteString(f, "%!v(BADWIDTH)") + return + } + + _, _ = io.WriteString(f, le.Error()) + return + } + + if f.Flag('+') && f.Flag('-') { + _, _ = io.WriteString(f, "%!v(BADFLAG)") + return + } + + fmt.Fprintf(f, "%s: %s:", le.source, le.Cause.Error()) + + omitted := len(le.Log) - n + lines := le.Log[:n] + if f.Flag('-') { + // Print last instead of first lines. + lines = le.Log[len(le.Log)-n:] + if omitted > 0 { + fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted) + } + } + + for _, line := range lines { + fmt.Fprintf(f, "\n\t%s", line) + } + + if !f.Flag('-') { + if omitted > 0 { + fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted) + } + } + + default: + fmt.Fprintf(f, "%%!%c(BADVERB)", verb) + } } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/feature.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/feature.go index c94a2e1ee..e27064c23 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/feature.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/feature.go @@ -3,15 +3,30 @@ package internal import ( "errors" "fmt" + "runtime" "sync" + + "github.com/cilium/ebpf/internal/platform" ) -// ErrNotSupported indicates that a feature is not supported by the current kernel. +// ErrNotSupported indicates that a feature is not supported. var ErrNotSupported = errors.New("not supported") +// ErrNotSupportedOnOS indicates that a feature is not supported on the current +// operating system. +var ErrNotSupportedOnOS = fmt.Errorf("%w on %s", ErrNotSupported, runtime.GOOS) + +// ErrRestrictedKernel is returned when kernel address information is restricted +// by kernel.kptr_restrict and/or net.core.bpf_jit_harden sysctls. +var ErrRestrictedKernel = errors.New("restricted by kernel.kptr_restrict and/or net.core.bpf_jit_harden sysctls") + // UnsupportedFeatureError is returned by FeatureTest() functions. type UnsupportedFeatureError struct { - // The minimum Linux mainline version required for this feature. + // The minimum version required for this feature. + // + // On Linux this refers to the mainline kernel version, on other platforms + // to the version of the runtime. + // // Used for the error string, and for sanity checking during testing. MinimumVersion Version @@ -31,10 +46,20 @@ func (ufe *UnsupportedFeatureError) Is(target error) bool { return target == ErrNotSupported } -type featureTest struct { - sync.RWMutex - successful bool - result error +// FeatureTest caches the result of a [FeatureTestFn]. +// +// Fields should not be modified after creation. +type FeatureTest struct { + // The name of the feature being detected. + Name string + // Version in the form Major.Minor[.Patch]. + Version string + // The feature test itself. + Fn FeatureTestFn + + mu sync.RWMutex + done bool + result error } // FeatureTestFn is used to determine whether the kernel supports @@ -42,59 +67,161 @@ type featureTest struct { // // The return values have the following semantics: // -// err == ErrNotSupported: the feature is not available -// err == nil: the feature is available -// err != nil: the test couldn't be executed +// err == ErrNotSupported: the feature is not available +// err == nil: the feature is available +// err != nil: the test couldn't be executed type FeatureTestFn func() error -// FeatureTest wraps a function so that it is run at most once. +// NewFeatureTest is a convenient way to create a single [FeatureTest]. // -// name should identify the tested feature, while version must be in the -// form Major.Minor[.Patch]. -// -// Returns an error wrapping ErrNotSupported if the feature is not supported. -func FeatureTest(name, version string, fn FeatureTestFn) func() error { - v, err := NewVersion(version) +// versions specifies in which version of a BPF runtime a feature appeared. +// The format is "GOOS:Major.Minor[.Patch]". GOOS may be omitted when targeting +// Linux. Returns [ErrNotSupportedOnOS] if there is no version specified for the +// current OS. +func NewFeatureTest(name string, fn FeatureTestFn, versions ...string) func() error { + version, err := platform.SelectVersion(versions) if err != nil { return func() error { return err } } - ft := new(featureTest) - return func() error { - ft.RLock() - if ft.successful { - defer ft.RUnlock() - return ft.result - } - ft.RUnlock() - ft.Lock() - defer ft.Unlock() - // check one more time on the off - // chance that two go routines - // were able to call into the write - // lock - if ft.successful { - return ft.result + if version == "" { + return func() error { + // We don't return an UnsupportedFeatureError here, since that will + // trigger version checks which don't make sense. + return fmt.Errorf("%s: %w", name, ErrNotSupportedOnOS) } - err := fn() - switch { - case errors.Is(err, ErrNotSupported): - ft.result = &UnsupportedFeatureError{ - MinimumVersion: v, - Name: name, - } - fallthrough + } - case err == nil: - ft.successful = true + ft := &FeatureTest{ + Name: name, + Version: version, + Fn: fn, + } - default: - // We couldn't execute the feature test to a point - // where it could make a determination. - // Don't cache the result, just return it. - return fmt.Errorf("detect support for %s: %w", name, err) + return ft.execute +} + +// execute the feature test. +// +// The result is cached if the test is conclusive. +// +// See [FeatureTestFn] for the meaning of the returned error. +func (ft *FeatureTest) execute() error { + ft.mu.RLock() + result, done := ft.result, ft.done + ft.mu.RUnlock() + + if done { + return result + } + + ft.mu.Lock() + defer ft.mu.Unlock() + + // The test may have been executed by another caller while we were + // waiting to acquire ft.mu. + if ft.done { + return ft.result + } + + err := ft.Fn() + if err == nil { + ft.done = true + return nil + } + + if errors.Is(err, ErrNotSupported) { + var v Version + if ft.Version != "" { + v, err = NewVersion(ft.Version) + if err != nil { + return fmt.Errorf("feature %s: %w", ft.Name, err) + } + } + + ft.done = true + ft.result = &UnsupportedFeatureError{ + MinimumVersion: v, + Name: ft.Name, } return ft.result } + + // We couldn't execute the feature test to a point + // where it could make a determination. + // Don't cache the result, just return it. + return fmt.Errorf("detect support for %s: %w", ft.Name, err) +} + +// FeatureMatrix groups multiple related feature tests into a map. +// +// Useful when there is a small number of discrete features which are known +// at compile time. +// +// It must not be modified concurrently with calling [FeatureMatrix.Result]. +type FeatureMatrix[K comparable] map[K]*FeatureTest + +// Result returns the outcome of the feature test for the given key. +// +// It's safe to call this function concurrently. +// +// Always returns [ErrNotSupportedOnOS] on Windows. +func (fm FeatureMatrix[K]) Result(key K) error { + ft, ok := fm[key] + if !ok { + return fmt.Errorf("no feature probe for %v", key) + } + + if platform.IsWindows { + return fmt.Errorf("%s: %w", ft.Name, ErrNotSupportedOnOS) + } + + return ft.execute() +} + +// FeatureCache caches a potentially unlimited number of feature probes. +// +// Useful when there is a high cardinality for a feature test. +type FeatureCache[K comparable] struct { + mu sync.RWMutex + newTest func(K) *FeatureTest + features map[K]*FeatureTest +} + +func NewFeatureCache[K comparable](newTest func(K) *FeatureTest) *FeatureCache[K] { + return &FeatureCache[K]{ + newTest: newTest, + features: make(map[K]*FeatureTest), + } +} + +func (fc *FeatureCache[K]) Result(key K) error { + if platform.IsWindows { + return fmt.Errorf("feature probe for %v: %w", key, ErrNotSupportedOnOS) + } + + // NB: Executing the feature test happens without fc.mu taken. + return fc.retrieve(key).execute() +} + +func (fc *FeatureCache[K]) retrieve(key K) *FeatureTest { + fc.mu.RLock() + ft := fc.features[key] + fc.mu.RUnlock() + + if ft != nil { + return ft + } + + fc.mu.Lock() + defer fc.mu.Unlock() + + if ft := fc.features[key]; ft != nil { + return ft + } + + ft = fc.newTest(key) + fc.features[key] = ft + return ft } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/io.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/io.go index 7177e596a..1eaf4775a 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/io.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/io.go @@ -2,10 +2,14 @@ package internal import ( "bufio" + "bytes" "compress/gzip" "errors" + "fmt" "io" "os" + "path/filepath" + "sync" ) // NewBufferedSectionReader wraps an io.ReaderAt in an appropriately-sized @@ -18,7 +22,7 @@ import ( // end up being read completely anyway. // // Use instead of the r.Seek() + io.LimitReader() pattern. -func NewBufferedSectionReader(ra io.ReaderAt, off, n int64) io.Reader { +func NewBufferedSectionReader(ra io.ReaderAt, off, n int64) *bufio.Reader { // Clamp the size of the buffer to one page to avoid slurping large parts // of a file into memory. bufio.NewReader uses a hardcoded default buffer // of 4096. Allow arches with larger pages to allocate more, but don't @@ -60,3 +64,65 @@ func ReadAllCompressed(file string) ([]byte, error) { return io.ReadAll(gz) } + +// ReadUint64FromFile reads a uint64 from a file. +// +// format specifies the contents of the file in fmt.Scanf syntax. +func ReadUint64FromFile(format string, path ...string) (uint64, error) { + filename := filepath.Join(path...) + data, err := os.ReadFile(filename) + if err != nil { + return 0, fmt.Errorf("reading file %q: %w", filename, err) + } + + var value uint64 + n, err := fmt.Fscanf(bytes.NewReader(data), format, &value) + if err != nil { + return 0, fmt.Errorf("parsing file %q: %w", filename, err) + } + if n != 1 { + return 0, fmt.Errorf("parsing file %q: expected 1 item, got %d", filename, n) + } + + return value, nil +} + +type uint64FromFileKey struct { + format, path string +} + +var uint64FromFileCache = struct { + sync.RWMutex + values map[uint64FromFileKey]uint64 +}{ + values: map[uint64FromFileKey]uint64{}, +} + +// ReadUint64FromFileOnce is like readUint64FromFile but memoizes the result. +func ReadUint64FromFileOnce(format string, path ...string) (uint64, error) { + filename := filepath.Join(path...) + key := uint64FromFileKey{format, filename} + + uint64FromFileCache.RLock() + if value, ok := uint64FromFileCache.values[key]; ok { + uint64FromFileCache.RUnlock() + return value, nil + } + uint64FromFileCache.RUnlock() + + value, err := ReadUint64FromFile(format, filename) + if err != nil { + return 0, err + } + + uint64FromFileCache.Lock() + defer uint64FromFileCache.Unlock() + + if value, ok := uint64FromFileCache.values[key]; ok { + // Someone else got here before us, use what is cached. + return value, nil + } + + uint64FromFileCache.values[key] = value + return value, nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/kallsyms/cache.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/kallsyms/cache.go new file mode 100644 index 000000000..b7f3e0b78 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/kallsyms/cache.go @@ -0,0 +1,20 @@ +package kallsyms + +import "sync" + +type cache[K, V comparable] struct { + m sync.Map +} + +func (c *cache[K, V]) Load(key K) (value V, _ bool) { + v, ok := c.m.Load(key) + if !ok { + return value, false + } + value = v.(V) + return value, true +} + +func (c *cache[K, V]) Store(key K, value V) { + c.m.Store(key, value) +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go new file mode 100644 index 000000000..efc64a503 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go @@ -0,0 +1,161 @@ +package kallsyms + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "slices" + "strconv" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/platform" +) + +var errAmbiguousKsym = errors.New("multiple kernel symbols with the same name") + +var symAddrs cache[string, uint64] + +// AssignAddresses looks up the addresses of the requested symbols in the kernel +// and assigns them to their corresponding values in the symbols map. Results +// of all lookups are cached, successful or otherwise. +// +// Any symbols missing in the kernel are ignored. Returns an error if multiple +// addresses were found for a symbol. +func AssignAddresses(symbols map[string]uint64) error { + if !platform.IsLinux { + return fmt.Errorf("read /proc/kallsyms: %w", internal.ErrNotSupportedOnOS) + } + + if len(symbols) == 0 { + return nil + } + + // Attempt to fetch symbols from cache. + request := make(map[string]uint64) + for name := range symbols { + if addr, ok := symAddrs.Load(name); ok { + symbols[name] = addr + continue + } + + // Mark the symbol to be read from /proc/kallsyms. + request[name] = 0 + } + if len(request) == 0 { + // All symbols satisfied from cache. + return nil + } + + f, err := os.Open("/proc/kallsyms") + if err != nil { + return err + } + defer f.Close() + + if err := assignAddresses(f, request); err != nil { + return fmt.Errorf("loading symbol addresses: %w", err) + } + + // Update the cache with the new symbols. Cache all requested symbols even if + // they weren't found, to avoid repeated lookups. + for name, addr := range request { + symAddrs.Store(name, addr) + symbols[name] = addr + } + + return nil +} + +// assignAddresses assigns kernel symbol addresses read from f to values +// requested by symbols. Always scans the whole input to make sure the user +// didn't request an ambiguous symbol. +func assignAddresses(f io.Reader, symbols map[string]uint64) error { + if len(symbols) == 0 { + return nil + } + r := newReader(f) + for r.Line() { + s, err, skip := parseSymbol(r, nil) + if err != nil { + return fmt.Errorf("parsing kallsyms line: %w", err) + } + if skip { + continue + } + + existing, requested := symbols[string(s.name)] + if existing != 0 { + // Multiple addresses for a symbol have been found. Return a friendly + // error to avoid silently attaching to the wrong symbol. libbpf also + // rejects referring to ambiguous symbols. + return fmt.Errorf("symbol %s(0x%x): duplicate found at address 0x%x: %w", s.name, existing, s.addr, errAmbiguousKsym) + } + if requested { + // Reading a symbol with a zero address is a strong indication that + // kptr_restrict is set and the process doesn't have CAP_SYSLOG, or + // kptr_restrict is set to 2 (never show addresses). + // + // When running the kernel with KASLR disabled (like CI kernels running in + // microVMs), kallsyms will display many absolute symbols at address 0. + // This memory is unlikely to contain anything useful, and production + // machines are unlikely to run without KASLR. + // + // Return a helpful error instead of silently returning zero addresses. + if s.addr == 0 { + return fmt.Errorf("symbol %s: %w", s.name, internal.ErrRestrictedKernel) + } + symbols[string(s.name)] = s.addr + } + } + if err := r.Err(); err != nil { + return fmt.Errorf("reading kallsyms: %w", err) + } + + return nil +} + +type ksym struct { + addr uint64 + name []byte + mod []byte +} + +// parseSymbol parses a line from /proc/kallsyms into an address, type, name and +// module. Skip will be true if the symbol doesn't match any of the given symbol +// types. See `man 1 nm` for all available types. +// +// Only yields symbols whose type is contained in types. An empty value for types +// disables this filtering. +// +// Example line: `ffffffffc1682010 T nf_nat_init\t[nf_nat]` +func parseSymbol(r *reader, types []rune) (s ksym, err error, skip bool) { + for i := 0; r.Word(); i++ { + switch i { + // Address of the symbol. + case 0: + s.addr, err = strconv.ParseUint(r.Text(), 16, 64) + if err != nil { + return s, fmt.Errorf("parsing address: %w", err), false + } + // Type of the symbol. Assume the character is ASCII-encoded by converting + // it directly to a rune, since it's a fixed field controlled by the kernel. + case 1: + if len(types) > 0 && !slices.Contains(types, rune(r.Bytes()[0])) { + return s, nil, true + } + // Name of the symbol. + case 2: + s.name = r.Bytes() + // Kernel module the symbol is provided by. + case 3: + s.mod = bytes.Trim(r.Bytes(), "[]") + // Ignore any future fields. + default: + return + } + } + + return +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/kallsyms/reader.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/kallsyms/reader.go new file mode 100644 index 000000000..3011e83f6 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/kallsyms/reader.go @@ -0,0 +1,89 @@ +package kallsyms + +import ( + "bufio" + "bytes" + "io" +) + +// reader is a line and word-oriented reader built for reading /proc/kallsyms. +// It takes an io.Reader and iterates its contents line by line, then word by +// word. +// +// It's designed to allow partial reading of lines without paying the cost of +// allocating objects that will never be accessed, resulting in less work for +// the garbage collector. +type reader struct { + s *bufio.Scanner + line []byte + word []byte + + err error +} + +func newReader(r io.Reader) *reader { + return &reader{ + s: bufio.NewScanner(r), + } +} + +// Bytes returns the current word as a byte slice. +func (r *reader) Bytes() []byte { + return r.word +} + +// Text returns the output of Bytes as a string. +func (r *reader) Text() string { + return string(r.Bytes()) +} + +// Line advances the reader to the next line in the input. Calling Line resets +// the current word, making [reader.Bytes] and [reader.Text] return empty +// values. Follow this up with a call to [reader.Word]. +// +// Like [bufio.Scanner], [reader.Err] needs to be checked after Line returns +// false to determine if an error occurred during reading. +// +// Returns true if Line can be called again. Returns false if all lines in the +// input have been read. +func (r *reader) Line() bool { + for r.s.Scan() { + line := r.s.Bytes() + if len(line) == 0 { + continue + } + + r.line = line + r.word = nil + + return true + } + if err := r.s.Err(); err != nil { + r.err = err + } + + return false +} + +// Word advances the reader to the next word in the current line. +// +// Returns true if a word is found and Word should be called again. Returns +// false when all words on the line have been read. +func (r *reader) Word() bool { + line := bytes.TrimSpace(r.line) + + if len(line) == 0 { + return false + } + + var found bool + r.word, r.line, found = bytes.Cut(line, []byte{' '}) + if !found { + r.word, r.line, _ = bytes.Cut(line, []byte{'\t'}) + } + return true +} + +func (r *reader) Err() error { + return r.err +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go new file mode 100644 index 000000000..29c62b626 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go @@ -0,0 +1,274 @@ +// Package kconfig implements a parser for the format of Linux's .config file. +package kconfig + +import ( + "bufio" + "bytes" + "compress/gzip" + "fmt" + "io" + "math" + "strconv" + "strings" + + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" +) + +// Parse parses the kconfig file for which a reader is given. +// All the CONFIG_* which are in filter and which are set set will be +// put in the returned map as key with their corresponding value as map value. +// If filter is nil, no filtering will occur. +// If the kconfig file is not valid, error will be returned. +func Parse(source io.ReaderAt, filter map[string]struct{}) (map[string]string, error) { + var r io.Reader + zr, err := gzip.NewReader(io.NewSectionReader(source, 0, math.MaxInt64)) + if err != nil { + r = io.NewSectionReader(source, 0, math.MaxInt64) + } else { + // Source is gzip compressed, transparently decompress. + r = zr + } + + ret := make(map[string]string, len(filter)) + + s := bufio.NewScanner(r) + + for s.Scan() { + line := s.Bytes() + err = processKconfigLine(line, ret, filter) + if err != nil { + return nil, fmt.Errorf("cannot parse line: %w", err) + } + + if filter != nil && len(ret) == len(filter) { + break + } + } + + if err := s.Err(); err != nil { + return nil, fmt.Errorf("cannot parse: %w", err) + } + + if zr != nil { + return ret, zr.Close() + } + + return ret, nil +} + +// Golang translation of libbpf bpf_object__process_kconfig_line(): +// https://github.com/libbpf/libbpf/blob/fbd60dbff51c870f5e80a17c4f2fd639eb80af90/src/libbpf.c#L1874 +// It does the same checks but does not put the data inside the BPF map. +func processKconfigLine(line []byte, m map[string]string, filter map[string]struct{}) error { + // Ignore empty lines and "# CONFIG_* is not set". + if !bytes.HasPrefix(line, []byte("CONFIG_")) { + return nil + } + + key, value, found := bytes.Cut(line, []byte{'='}) + if !found { + return fmt.Errorf("line %q does not contain separator '='", line) + } + + if len(value) == 0 { + return fmt.Errorf("line %q has no value", line) + } + + if filter != nil { + // NB: map[string(key)] gets special optimisation help from the compiler + // and doesn't allocate. Don't turn this into a variable. + _, ok := filter[string(key)] + if !ok { + return nil + } + } + + // This can seem odd, but libbpf only sets the value the first time the key is + // met: + // https://github.com/torvalds/linux/blob/0d85b27b0cc6/tools/lib/bpf/libbpf.c#L1906-L1908 + _, ok := m[string(key)] + if !ok { + m[string(key)] = string(value) + } + + return nil +} + +// PutValue translates the value given as parameter depending on the BTF +// type, the translated value is then written to the byte array. +func PutValue(data []byte, typ btf.Type, value string) error { + typ = btf.UnderlyingType(typ) + + switch value { + case "y", "n", "m": + return putValueTri(data, typ, value) + } + + if strings.HasPrefix(value, `"`) { + return putValueString(data, typ, value) + } + + return putValueNumber(data, typ, value) +} + +// Golang translation of libbpf_tristate enum: +// https://github.com/libbpf/libbpf/blob/fbd60dbff51c870f5e80a17c4f2fd639eb80af90/src/bpf_helpers.h#L169 +type triState int + +const ( + TriNo triState = 0 + TriYes triState = 1 + TriModule triState = 2 +) + +func putValueTri(data []byte, typ btf.Type, value string) error { + switch v := typ.(type) { + case *btf.Int: + if v.Encoding != btf.Bool { + return fmt.Errorf("cannot add tri value, expected btf.Bool, got: %v", v.Encoding) + } + + if v.Size != 1 { + return fmt.Errorf("cannot add tri value, expected size of 1 byte, got: %d", v.Size) + } + + switch value { + case "y": + data[0] = 1 + case "n": + data[0] = 0 + default: + return fmt.Errorf("cannot use %q for btf.Bool", value) + } + case *btf.Enum: + if v.Name != "libbpf_tristate" { + return fmt.Errorf("cannot use enum %q, only libbpf_tristate is supported", v.Name) + } + + if len(data) != 4 { + return fmt.Errorf("expected enum value to occupy 4 bytes in datasec, got: %d", len(data)) + } + + var tri triState + switch value { + case "y": + tri = TriYes + case "m": + tri = TriModule + case "n": + tri = TriNo + default: + return fmt.Errorf("value %q is not supported for libbpf_tristate", value) + } + + internal.NativeEndian.PutUint32(data, uint32(tri)) + default: + return fmt.Errorf("cannot add number value, expected btf.Int or btf.Enum, got: %T", v) + } + + return nil +} + +func putValueString(data []byte, typ btf.Type, value string) error { + array, ok := typ.(*btf.Array) + if !ok { + return fmt.Errorf("cannot add string value, expected btf.Array, got %T", array) + } + + contentType, ok := btf.UnderlyingType(array.Type).(*btf.Int) + if !ok { + return fmt.Errorf("cannot add string value, expected array of btf.Int, got %T", contentType) + } + + // Any Int, which is not bool, of one byte could be used to store char: + // https://github.com/torvalds/linux/blob/1a5304fecee5/tools/lib/bpf/libbpf.c#L3637-L3638 + if contentType.Size != 1 && contentType.Encoding != btf.Bool { + return fmt.Errorf("cannot add string value, expected array of btf.Int of size 1, got array of btf.Int of size: %v", contentType.Size) + } + + if !strings.HasPrefix(value, `"`) || !strings.HasSuffix(value, `"`) { + return fmt.Errorf(`value %q must start and finish with '"'`, value) + } + + str := strings.Trim(value, `"`) + + // We need to trim string if the bpf array is smaller. + if uint32(len(str)) >= array.Nelems { + str = str[:array.Nelems] + } + + // Write the string content to .kconfig. + copy(data, str) + + return nil +} + +func putValueNumber(data []byte, typ btf.Type, value string) error { + integer, ok := typ.(*btf.Int) + if !ok { + return fmt.Errorf("cannot add number value, expected *btf.Int, got: %T", integer) + } + + size := integer.Size + sizeInBits := size * 8 + + var n uint64 + var err error + if integer.Encoding == btf.Signed { + parsed, e := strconv.ParseInt(value, 0, int(sizeInBits)) + + n = uint64(parsed) + err = e + } else { + parsed, e := strconv.ParseUint(value, 0, int(sizeInBits)) + + n = uint64(parsed) + err = e + } + + if err != nil { + return fmt.Errorf("cannot parse value: %w", err) + } + + return PutInteger(data, integer, n) +} + +// PutInteger writes n into data. +// +// integer determines how much is written into data and what the valid values +// are. +func PutInteger(data []byte, integer *btf.Int, n uint64) error { + // This function should match set_kcfg_value_num in libbpf. + if integer.Encoding == btf.Bool && n > 1 { + return fmt.Errorf("invalid boolean value: %d", n) + } + + if len(data) < int(integer.Size) { + return fmt.Errorf("can't fit an integer of size %d into a byte slice of length %d", integer.Size, len(data)) + } + + switch integer.Size { + case 1: + if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt8 || int64(n) < math.MinInt8) { + return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size) + } + data[0] = byte(n) + case 2: + if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt16 || int64(n) < math.MinInt16) { + return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size) + } + internal.NativeEndian.PutUint16(data, uint16(n)) + case 4: + if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt32 || int64(n) < math.MinInt32) { + return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size) + } + internal.NativeEndian.PutUint32(data, uint32(n)) + case 8: + internal.NativeEndian.PutUint64(data, uint64(n)) + default: + return fmt.Errorf("size (%d) is not valid, expected: 1, 2, 4 or 8", integer.Size) + } + + return nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/auxv.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/auxv.go new file mode 100644 index 000000000..a864d6b4a --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/auxv.go @@ -0,0 +1,63 @@ +package linux + +import ( + "fmt" + "io" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/unix" +) + +type auxvPairReader interface { + Close() error + ReadAuxvPair() (uint64, uint64, error) +} + +// See https://elixir.bootlin.com/linux/v6.5.5/source/include/uapi/linux/auxvec.h +const ( + _AT_NULL = 0 // End of vector + _AT_SYSINFO_EHDR = 33 // Offset to vDSO blob in process image +) + +type auxvRuntimeReader struct { + data [][2]uintptr + index int +} + +func (r *auxvRuntimeReader) Close() error { + return nil +} + +func (r *auxvRuntimeReader) ReadAuxvPair() (uint64, uint64, error) { + if r.index >= len(r.data)+2 { + return 0, 0, io.EOF + } + + // we manually add the (_AT_NULL, _AT_NULL) pair at the end + // that is not provided by the go runtime + var tag, value uintptr + if r.index < len(r.data) { + tag, value = r.data[r.index][0], r.data[r.index][1] + } else { + tag, value = _AT_NULL, _AT_NULL + } + r.index += 1 + return uint64(tag), uint64(value), nil +} + +func newAuxvRuntimeReader() (auxvPairReader, error) { + if !platform.IsLinux { + return nil, fmt.Errorf("read auxv from runtime: %w", internal.ErrNotSupportedOnOS) + } + + data, err := unix.Auxv() + if err != nil { + return nil, fmt.Errorf("read auxv from runtime: %w", err) + } + + return &auxvRuntimeReader{ + data: data, + index: 0, + }, nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/cpu.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/cpu.go similarity index 68% rename from src/nvcgo/vendor/github.com/cilium/ebpf/internal/cpu.go rename to src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/cpu.go index 3affa1efb..bd55ac915 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/cpu.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/cpu.go @@ -1,29 +1,12 @@ -package internal +package linux import ( "fmt" "os" "strings" - "sync" ) -var sysCPU struct { - once sync.Once - err error - num int -} - -// PossibleCPUs returns the max number of CPUs a system may possibly have -// Logical CPU numbers must be of the form 0-n -func PossibleCPUs() (int, error) { - sysCPU.once.Do(func() { - sysCPU.num, sysCPU.err = parseCPUsFromFile("/sys/devices/system/cpu/possible") - }) - - return sysCPU.num, sysCPU.err -} - -func parseCPUsFromFile(path string) (int, error) { +func ParseCPUsFromFile(path string) (int, error) { spec, err := os.ReadFile(path) if err != nil { return 0, err diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/doc.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/doc.go new file mode 100644 index 000000000..064e75437 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/doc.go @@ -0,0 +1,2 @@ +// Package linux contains OS specific wrappers around package unix. +package linux diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/kconfig.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/kconfig.go new file mode 100644 index 000000000..1488ecb35 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/kconfig.go @@ -0,0 +1,31 @@ +package linux + +import ( + "fmt" + "os" +) + +// FindKConfig searches for a kconfig file on the host. +// +// It first reads from /boot/config- of the current running kernel and tries +// /proc/config.gz if nothing was found in /boot. +// If none of the file provide a kconfig, it returns an error. +func FindKConfig() (*os.File, error) { + kernelRelease, err := KernelRelease() + if err != nil { + return nil, fmt.Errorf("cannot get kernel release: %w", err) + } + + path := "/boot/config-" + kernelRelease + f, err := os.Open(path) + if err == nil { + return f, nil + } + + f, err = os.Open("/proc/config.gz") + if err == nil { + return f, nil + } + + return nil, fmt.Errorf("neither %s nor /proc/config.gz provide a kconfig", path) +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/platform.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/platform.go new file mode 100644 index 000000000..39bdcc51f --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/platform.go @@ -0,0 +1,43 @@ +package linux + +import ( + "runtime" +) + +// PlatformPrefix returns the platform-dependent syscall wrapper prefix used by +// the linux kernel. +// +// Based on https://github.com/golang/go/blob/master/src/go/build/syslist.go +// and https://github.com/libbpf/libbpf/blob/master/src/libbpf.c#L10047 +func PlatformPrefix() string { + switch runtime.GOARCH { + case "386": + return "__ia32_" + case "amd64", "amd64p32": + return "__x64_" + + case "arm", "armbe": + return "__arm_" + case "arm64", "arm64be": + return "__arm64_" + + case "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le": + return "__mips_" + + case "s390": + return "__s390_" + case "s390x": + return "__s390x_" + + case "riscv", "riscv64": + return "__riscv_" + + case "ppc": + return "__powerpc_" + case "ppc64", "ppc64le": + return "__powerpc64_" + + default: + return "" + } +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/statfs.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/statfs.go new file mode 100644 index 000000000..e268c06fa --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/statfs.go @@ -0,0 +1,23 @@ +package linux + +import ( + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +func FSType(path string) (int64, error) { + var statfs unix.Statfs_t + if err := unix.Statfs(path, &statfs); err != nil { + return 0, err + } + + fsType := int64(statfs.Type) + if unsafe.Sizeof(statfs.Type) == 4 { + // We're on a 32 bit arch, where statfs.Type is int32. bpfFSType is a + // negative number when interpreted as int32 so we need to cast via + // uint32 to avoid sign extension. + fsType = int64(uint32(statfs.Type)) + } + return fsType, nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/vdso.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/vdso.go new file mode 100644 index 000000000..1d8d0ef6b --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/vdso.go @@ -0,0 +1,144 @@ +package linux + +import ( + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "os" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/unix" +) + +var ( + errAuxvNoVDSO = errors.New("no vdso address found in auxv") +) + +// vdsoVersion returns the LINUX_VERSION_CODE embedded in the vDSO library +// linked into the current process image. +func vdsoVersion() (uint32, error) { + av, err := newAuxvRuntimeReader() + if err != nil { + return 0, err + } + + defer av.Close() + + vdsoAddr, err := vdsoMemoryAddress(av) + if err != nil { + return 0, fmt.Errorf("finding vDSO memory address: %w", err) + } + + // Use /proc/self/mem rather than unsafe.Pointer tricks. + mem, err := os.Open("/proc/self/mem") + if err != nil { + return 0, fmt.Errorf("opening mem: %w", err) + } + defer mem.Close() + + // Open ELF at provided memory address, as offset into /proc/self/mem. + c, err := vdsoLinuxVersionCode(io.NewSectionReader(mem, int64(vdsoAddr), math.MaxInt64)) + if err != nil { + return 0, fmt.Errorf("reading linux version code: %w", err) + } + + return c, nil +} + +// vdsoMemoryAddress returns the memory address of the vDSO library +// linked into the current process image. r is an io.Reader into an auxv blob. +func vdsoMemoryAddress(r auxvPairReader) (uintptr, error) { + // Loop through all tag/value pairs in auxv until we find `AT_SYSINFO_EHDR`, + // the address of a page containing the virtual Dynamic Shared Object (vDSO). + for { + tag, value, err := r.ReadAuxvPair() + if err != nil { + return 0, err + } + + switch tag { + case _AT_SYSINFO_EHDR: + if value != 0 { + return uintptr(value), nil + } + return 0, fmt.Errorf("invalid vDSO address in auxv") + // _AT_NULL is always the last tag/val pair in the aux vector + // and can be treated like EOF. + case _AT_NULL: + return 0, errAuxvNoVDSO + } + } +} + +// format described at https://www.man7.org/linux/man-pages/man5/elf.5.html in section 'Notes (Nhdr)' +type elfNoteHeader struct { + NameSize int32 + DescSize int32 + Type int32 +} + +// vdsoLinuxVersionCode returns the LINUX_VERSION_CODE embedded in +// the ELF notes section of the binary provided by the reader. +func vdsoLinuxVersionCode(r io.ReaderAt) (uint32, error) { + hdr, err := internal.NewSafeELFFile(r) + if err != nil { + return 0, fmt.Errorf("reading vDSO ELF: %w", err) + } + + sections := hdr.SectionsByType(elf.SHT_NOTE) + if len(sections) == 0 { + return 0, fmt.Errorf("no note section found in vDSO ELF") + } + + for _, sec := range sections { + sr := sec.Open() + var n elfNoteHeader + + // Read notes until we find one named 'Linux'. + for { + if err := binary.Read(sr, hdr.ByteOrder, &n); err != nil { + if errors.Is(err, io.EOF) { + // We looked at all the notes in this section + break + } + return 0, fmt.Errorf("reading note header: %w", err) + } + + // If a note name is defined, it follows the note header. + var name string + if n.NameSize > 0 { + // Read the note name, aligned to 4 bytes. + buf := make([]byte, internal.Align(n.NameSize, 4)) + if err := binary.Read(sr, hdr.ByteOrder, &buf); err != nil { + return 0, fmt.Errorf("reading note name: %w", err) + } + + // Read nul-terminated string. + name = unix.ByteSliceToString(buf[:n.NameSize]) + } + + // If a note descriptor is defined, it follows the name. + // It is possible for a note to have a descriptor but not a name. + if n.DescSize > 0 { + // LINUX_VERSION_CODE is a uint32 value. + if name == "Linux" && n.DescSize == 4 && n.Type == 0 { + var version uint32 + if err := binary.Read(sr, hdr.ByteOrder, &version); err != nil { + return 0, fmt.Errorf("reading note descriptor: %w", err) + } + return version, nil + } + + // Discard the note descriptor if it exists but we're not interested in it. + if _, err := io.CopyN(io.Discard, sr, int64(internal.Align(n.DescSize, 4))); err != nil { + return 0, err + } + } + } + } + + return 0, fmt.Errorf("no Linux note in ELF") +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/version.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/version.go new file mode 100644 index 000000000..798dd3fed --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/linux/version.go @@ -0,0 +1,34 @@ +package linux + +import ( + "fmt" + "sync" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/unix" +) + +// KernelVersion returns the version of the currently running kernel. +var KernelVersion = sync.OnceValues(detectKernelVersion) + +// detectKernelVersion returns the version of the running kernel. +func detectKernelVersion() (internal.Version, error) { + vc, err := vdsoVersion() + if err != nil { + return internal.Version{}, err + } + return internal.NewVersionFromCode(vc), nil +} + +// KernelRelease returns the release string of the running kernel. +// Its format depends on the Linux distribution and corresponds to directory +// names in /lib/modules by convention. Some examples are 5.15.17-1-lts and +// 4.19.0-16-amd64. +func KernelRelease() (string, error) { + var uname unix.Utsname + if err := unix.Uname(&uname); err != nil { + return "", fmt.Errorf("uname failed: %w", err) + } + + return unix.ByteSliceToString(uname.Release[:]), nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/math.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/math.go new file mode 100644 index 000000000..10cde6686 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/math.go @@ -0,0 +1,33 @@ +package internal + +// Align returns 'n' updated to 'alignment' boundary. +func Align[I Integer](n, alignment I) I { + return (n + alignment - 1) / alignment * alignment +} + +// IsPow returns true if n is a power of two. +func IsPow[I Integer](n I) bool { + return n != 0 && (n&(n-1)) == 0 +} + +// Between returns the value clamped between a and b. +func Between[I Integer](val, a, b I) I { + lower, upper := a, b + if lower > upper { + upper, lower = a, b + } + + val = min(val, upper) + return max(val, lower) +} + +// Integer represents all possible integer types. +// Remove when x/exp/constraints is moved to the standard library. +type Integer interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +// List of integer types known by the Go compiler. Used by TestIntegerConstraint +// to warn if a new integer type is introduced. Remove when x/exp/constraints +// is moved to the standard library. +var integers = []string{"int", "int8", "int16", "int32", "int64", "uint", "uint8", "uint16", "uint32", "uint64", "uintptr"} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/output.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/output.go index aeab37fcf..bcbb6818d 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/output.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/output.go @@ -6,6 +6,7 @@ import ( "go/format" "go/scanner" "io" + "reflect" "strings" "unicode" ) @@ -82,3 +83,20 @@ func WriteFormatted(src []byte, out io.Writer) error { return nel } + +// GoTypeName is like %T, but elides the package name. +// +// Pointers to a type are peeled off. +func GoTypeName(t any) string { + rT := reflect.TypeOf(t) + for rT.Kind() == reflect.Pointer { + rT = rT.Elem() + } + + name := rT.Name() + if pkgPath := rT.PkgPath(); pkgPath != "" { + name = strings.ReplaceAll(name, pkgPath+".", "") + } + + return name +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/platform/constants.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/platform/constants.go new file mode 100644 index 000000000..b57ae1e59 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/platform/constants.go @@ -0,0 +1,62 @@ +package platform + +import "fmt" + +// Values used to tag platform specific constants. +// +// The value for Linux is zero so that existing constants do not change. +const ( + LinuxTag = uint32(iota) << platformShift + WindowsTag +) + +const ( + platformMax = 1<<3 - 1 // most not exceed 3 bits to avoid setting the high bit + platformShift = 28 + platformMask = platformMax << platformShift +) + +func tagForPlatform(platform string) (uint32, error) { + switch platform { + case Linux: + return LinuxTag, nil + case Windows: + return WindowsTag, nil + default: + return 0, fmt.Errorf("unrecognized platform: %s", platform) + } +} + +func platformForConstant(c uint32) string { + tag := uint32(c & platformMask) + switch tag { + case LinuxTag: + return Linux + case WindowsTag: + return Windows + default: + return "" + } +} + +// Encode a platform and a value into a tagged constant. +// +// Returns an error if platform is unknown or c is out of bounds. +func EncodeConstant[T ~uint32](platform string, c uint32) (T, error) { + if c>>platformShift > 0 { + return 0, fmt.Errorf("invalid constant 0x%x", c) + } + + tag, err := tagForPlatform(platform) + if err != nil { + return 0, err + } + + return T(tag | c), nil +} + +// Decode a platform and a value from a tagged constant. +func DecodeConstant[T ~uint32](c T) (string, uint32) { + v := uint32(c) & ^uint32(platformMask) + return platformForConstant(uint32(c)), v +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/platform/platform.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/platform/platform.go new file mode 100644 index 000000000..1c5bad396 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/platform/platform.go @@ -0,0 +1,42 @@ +package platform + +import ( + "errors" + "runtime" + "strings" +) + +const ( + Linux = "linux" + Windows = "windows" +) + +const ( + IsLinux = runtime.GOOS == "linux" + IsWindows = runtime.GOOS == "windows" +) + +// SelectVersion extracts the platform-appropriate version from a list of strings like +// `linux:6.1` or `windows:0.20.0`. +// +// Returns an empty string and nil if no version matched or an error if no strings were passed. +func SelectVersion(versions []string) (string, error) { + const prefix = runtime.GOOS + ":" + + if len(versions) == 0 { + return "", errors.New("no versions specified") + } + + for _, version := range versions { + if after, ok := strings.CutPrefix(version, prefix); ok { + return after, nil + } + + if IsLinux && !strings.ContainsRune(version, ':') { + // Allow version numbers without a GOOS prefix on Linux. + return version, nil + } + } + + return "", nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/platform/platform_linux.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/platform/platform_linux.go new file mode 100644 index 000000000..f0aa240dc --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/platform/platform_linux.go @@ -0,0 +1,3 @@ +package platform + +const Native = Linux diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/platform/platform_other.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/platform/platform_other.go new file mode 100644 index 000000000..cd33b3f68 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/platform/platform_other.go @@ -0,0 +1,5 @@ +//go:build !linux && !windows + +package platform + +const Native = "" diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/platform/platform_windows.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/platform/platform_windows.go new file mode 100644 index 000000000..26b4a8ecb --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/platform/platform_windows.go @@ -0,0 +1,3 @@ +package platform + +const Native = Windows diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/prog.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/prog.go new file mode 100644 index 000000000..d629145b6 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/prog.go @@ -0,0 +1,11 @@ +package internal + +// EmptyBPFContext is the smallest-possible BPF input context to be used for +// invoking `Program.{Run,Benchmark,Test}`. +// +// Programs require a context input buffer of at least 15 bytes. Looking in +// net/bpf/test_run.c, bpf_test_init() requires that the input is at least +// ETH_HLEN (14) bytes. As of Linux commit fd18942 ("bpf: Don't redirect packets +// with invalid pkt_len"), it also requires the skb to be non-empty after +// removing the Layer 2 header. +var EmptyBPFContext = make([]byte, 15) diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/doc.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/doc.go index 4b7245e2d..75d7e4013 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/doc.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/doc.go @@ -1,4 +1,6 @@ // Package sys contains bindings for the BPF syscall. package sys -//go:generate go run github.com/cilium/ebpf/internal/cmd/gentypes ../btf/testdata/vmlinux-btf.gz +// Regenerate types.go by invoking go generate in the current directory. + +//go:generate go tool gentypes ../../btf/testdata/vmlinux.btf.gz diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/fd.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/fd.go index 65517d45e..f12d11c20 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/fd.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/fd.go @@ -1,45 +1,32 @@ package sys import ( - "fmt" "math" - "os" "runtime" "strconv" + "github.com/cilium/ebpf/internal/testutils/testmain" "github.com/cilium/ebpf/internal/unix" ) var ErrClosedFd = unix.EBADF -type FD struct { - raw int -} - -func newFD(value int) *FD { - fd := &FD{value} - runtime.SetFinalizer(fd, (*FD).Close) - return fd -} - -// NewFD wraps a raw fd with a finalizer. +// A value for an invalid fd. // -// You must not use the raw fd after calling this function, since the underlying -// file descriptor number may change. This is because the BPF UAPI assumes that -// zero is not a valid fd value. -func NewFD(value int) (*FD, error) { - if value < 0 { - return nil, fmt.Errorf("invalid fd %d", value) - } +// Luckily this is consistent across Linux and Windows. +// +// See https://github.com/microsoft/ebpf-for-windows/blob/54632eb360c560ebef2f173be1a4a4625d540744/include/ebpf_api.h#L25 +const invalidFd = -1 - fd := newFD(value) - if value != 0 { - return fd, nil - } +func newFD(value int) *FD { + testmain.TraceFD(value, 1) - dup, err := fd.Dup() - _ = fd.Close() - return dup, err + fd := &FD{raw: value} + fd.cleanup = runtime.AddCleanup(fd, func(raw int) { + testmain.LeakFD(raw) + _ = unix.Close(raw) + }, fd.raw) + return fd } func (fd *FD) String() string { @@ -47,11 +34,11 @@ func (fd *FD) String() string { } func (fd *FD) Int() int { - return fd.raw + return int(fd.raw) } func (fd *FD) Uint() uint32 { - if fd.raw < 0 || int64(fd.raw) > math.MaxUint32 { + if fd.raw == invalidFd { // Best effort: this is the number most likely to be an invalid file // descriptor. It is equal to -1 (on two's complement arches). return math.MaxUint32 @@ -59,38 +46,14 @@ func (fd *FD) Uint() uint32 { return uint32(fd.raw) } -func (fd *FD) Close() error { - if fd.raw < 0 { - return nil - } - - value := int(fd.raw) - fd.raw = -1 - - fd.Forget() - return unix.Close(value) -} - -func (fd *FD) Forget() { - runtime.SetFinalizer(fd, nil) -} - -func (fd *FD) Dup() (*FD, error) { - if fd.raw < 0 { - return nil, ErrClosedFd - } - - // Always require the fd to be larger than zero: the BPF API treats the value - // as "no argument provided". - dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 1) - if err != nil { - return nil, fmt.Errorf("can't dup fd: %v", err) - } - - return newFD(dup), nil -} +// Disown destroys the FD and returns its raw file descriptor without closing +// it. After this call, the underlying fd is no longer tied to the FD's +// lifecycle. +func (fd *FD) Disown() int { + value := fd.raw + testmain.ForgetFD(value) + fd.raw = invalidFd -func (fd *FD) File(name string) *os.File { - fd.Forget() - return os.NewFile(uintptr(fd.raw), name) + fd.cleanup.Stop() + return value } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/fd_other.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/fd_other.go new file mode 100644 index 000000000..2a6423a59 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/fd_other.go @@ -0,0 +1,72 @@ +//go:build !windows + +package sys + +import ( + "fmt" + "os" + "runtime" + + "github.com/cilium/ebpf/internal/unix" +) + +type FD struct { + raw int + cleanup runtime.Cleanup +} + +// NewFD wraps a raw fd with a finalizer. +// +// You must not use the raw fd after calling this function, since the underlying +// file descriptor number may change. This is because the BPF UAPI assumes that +// zero is not a valid fd value. +func NewFD(value int) (*FD, error) { + if value < 0 { + return nil, fmt.Errorf("invalid fd %d", value) + } + + fd := newFD(value) + if value != 0 { + return fd, nil + } + + dup, err := fd.Dup() + _ = fd.Close() + return dup, err +} + +func (fd *FD) Close() error { + if fd.raw < 0 { + return nil + } + + return unix.Close(fd.Disown()) +} + +func (fd *FD) Dup() (*FD, error) { + if fd.raw < 0 { + return nil, ErrClosedFd + } + + // Always require the fd to be larger than zero: the BPF API treats the value + // as "no argument provided". + dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 1) + if err != nil { + return nil, fmt.Errorf("can't dup fd: %v", err) + } + + return newFD(dup), nil +} + +// File takes ownership of FD and turns it into an [*os.File]. +// +// You must not use the FD after the call returns. +// +// Returns [ErrClosedFd] if the fd is not valid. +func (fd *FD) File(name string) (*os.File, error) { + if fd.raw == invalidFd { + return nil, ErrClosedFd + } + + return os.NewFile(uintptr(fd.Disown()), name), nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/fd_windows.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/fd_windows.go new file mode 100644 index 000000000..1291c763f --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/fd_windows.go @@ -0,0 +1,60 @@ +package sys + +import ( + "fmt" + "os" + "runtime" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/efw" +) + +// FD wraps a handle which is managed by the eBPF for Windows runtime. +// +// It is not equivalent to a real file descriptor or handle. +type FD struct { + raw int + cleanup runtime.Cleanup +} + +// NewFD wraps a raw fd with a finalizer. +// +// You must not use the raw fd after calling this function. +func NewFD(value int) (*FD, error) { + if value == invalidFd { + return nil, fmt.Errorf("invalid fd %d", value) + } + + if value == 0 { + // The efW runtime never uses zero fd it seems. No need to dup it. + return nil, fmt.Errorf("invalid zero fd") + } + + return newFD(value), nil +} + +func (fd *FD) Close() error { + if fd.raw == invalidFd { + return nil + } + + return efw.EbpfCloseFd(fd.Disown()) +} + +func (fd *FD) Dup() (*FD, error) { + if fd.raw == invalidFd { + return nil, ErrClosedFd + } + + dup, err := efw.EbpfDuplicateFd(fd.raw) + if err != nil { + return nil, err + } + + return NewFD(int(dup)) +} + +// File is not implemented. +func (fd *FD) File(name string) (*os.File, error) { + return nil, fmt.Errorf("file from fd: %w", internal.ErrNotSupportedOnOS) +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/pinning.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/pinning_other.go similarity index 64% rename from src/nvcgo/vendor/github.com/cilium/ebpf/internal/pinning.go rename to src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/pinning_other.go index 9fa3146c7..96ad43abd 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/pinning.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/pinning_other.go @@ -1,4 +1,6 @@ -package internal +//go:build !windows + +package sys import ( "errors" @@ -7,13 +9,11 @@ import ( "path/filepath" "runtime" - "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/linux" "github.com/cilium/ebpf/internal/unix" ) -func Pin(currentPath, newPath string, fd *sys.FD) error { - const bpfFSType = 0xcafe4a11 - +func Pin(currentPath, newPath string, fd *FD) error { if newPath == "" { return errors.New("given pinning path cannot be empty") } @@ -21,25 +21,26 @@ func Pin(currentPath, newPath string, fd *sys.FD) error { return nil } - var statfs unix.Statfs_t - if err := unix.Statfs(filepath.Dir(newPath), &statfs); err != nil { + fsType, err := linux.FSType(filepath.Dir(newPath)) + if err != nil { return err - } else if uint64(statfs.Type) != bpfFSType { + } + if fsType != unix.BPF_FS_MAGIC { return fmt.Errorf("%s is not on a bpf filesystem", newPath) } defer runtime.KeepAlive(fd) if currentPath == "" { - return sys.ObjPin(&sys.ObjPinAttr{ - Pathname: sys.NewStringPointer(newPath), + return ObjPin(&ObjPinAttr{ + Pathname: NewStringPointer(newPath), BpfFd: fd.Uint(), }) } // Renameat2 is used instead of os.Rename to disallow the new path replacing // an existing path. - err := unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE) + err = unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE) if err == nil { // Object is now moved to the new pinning path. return nil @@ -48,8 +49,8 @@ func Pin(currentPath, newPath string, fd *sys.FD) error { return fmt.Errorf("unable to move pinned object to new path %v: %w", newPath, err) } // Internal state not in sync with the file system so let's fix it. - return sys.ObjPin(&sys.ObjPinAttr{ - Pathname: sys.NewStringPointer(newPath), + return ObjPin(&ObjPinAttr{ + Pathname: NewStringPointer(newPath), BpfFd: fd.Uint(), }) } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/pinning_windows.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/pinning_windows.go new file mode 100644 index 000000000..c8ab68550 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/pinning_windows.go @@ -0,0 +1,44 @@ +package sys + +import ( + "errors" + "runtime" + + "github.com/cilium/ebpf/internal/efw" +) + +func Pin(currentPath, newPath string, fd *FD) error { + defer runtime.KeepAlive(fd) + + if newPath == "" { + return errors.New("given pinning path cannot be empty") + } + if currentPath == newPath { + return nil + } + + if currentPath == "" { + return ObjPin(&ObjPinAttr{ + Pathname: NewStringPointer(newPath), + BpfFd: fd.Uint(), + }) + } + + return ObjPin(&ObjPinAttr{ + Pathname: NewStringPointer(newPath), + BpfFd: fd.Uint(), + }) +} + +func Unpin(pinnedPath string) error { + if pinnedPath == "" { + return nil + } + + err := efw.EbpfObjectUnpin(pinnedPath) + if err != nil && !errors.Is(err, efw.EBPF_KEY_NOT_FOUND) { + return err + } + + return nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/ptr.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/ptr.go index a22100688..aa6c2e91a 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/ptr.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/ptr.go @@ -6,33 +6,69 @@ import ( "github.com/cilium/ebpf/internal/unix" ) -// NewPointer creates a 64-bit pointer from an unsafe Pointer. -func NewPointer(ptr unsafe.Pointer) Pointer { +// UnsafePointer creates a 64-bit pointer from an unsafe Pointer. +func UnsafePointer(ptr unsafe.Pointer) Pointer { return Pointer{ptr: ptr} } -// NewSlicePointer creates a 64-bit pointer from a byte slice. -func NewSlicePointer(buf []byte) Pointer { +// UnsafeSlicePointer creates an untyped [Pointer] from a slice. +func UnsafeSlicePointer[T comparable](buf []T) Pointer { if len(buf) == 0 { return Pointer{} } - return Pointer{ptr: unsafe.Pointer(&buf[0])} + return Pointer{ptr: unsafe.Pointer(unsafe.SliceData(buf))} } -// NewSlicePointer creates a 64-bit pointer from a byte slice. +// TypedPointer points to typed memory. // -// Useful to assign both the pointer and the length in one go. -func NewSlicePointerLen(buf []byte) (Pointer, uint32) { - return NewSlicePointer(buf), uint32(len(buf)) +// It is like a *T except that it accounts for the BPF syscall interface. +type TypedPointer[T any] struct { + _ [0]*T // prevent TypedPointer[a] to be convertible to TypedPointer[b] + ptr Pointer } -// NewStringPointer creates a 64-bit pointer from a string. -func NewStringPointer(str string) Pointer { - p, err := unix.BytePtrFromString(str) +func (p TypedPointer[T]) IsNil() bool { + return p.ptr.ptr == nil +} + +// SlicePointer creates a [TypedPointer] from a slice. +func SlicePointer[T comparable](s []T) TypedPointer[T] { + return TypedPointer[T]{ptr: UnsafeSlicePointer(s)} +} + +// StringPointer points to a null-terminated string. +type StringPointer struct { + _ [0]string + ptr Pointer +} + +// NewStringPointer creates a [StringPointer] from a string. +func NewStringPointer(str string) StringPointer { + slice, err := unix.ByteSliceFromString(str) if err != nil { - return Pointer{} + return StringPointer{} + } + + return StringPointer{ptr: Pointer{ptr: unsafe.Pointer(&slice[0])}} +} + +// StringSlicePointer points to a slice of [StringPointer]. +type StringSlicePointer struct { + _ [0][]string + ptr Pointer +} + +// NewStringSlicePointer allocates an array of Pointers to each string in the +// given slice of strings and returns a 64-bit pointer to the start of the +// resulting array. +// +// Use this function to pass arrays of strings as syscall arguments. +func NewStringSlicePointer(strings []string) StringSlicePointer { + sp := make([]StringPointer, 0, len(strings)) + for _, s := range strings { + sp = append(sp, NewStringPointer(s)) } - return Pointer{ptr: unsafe.Pointer(p)} + return StringSlicePointer{ptr: Pointer{ptr: unsafe.Pointer(&sp[0])}} } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go index df903d780..0b0feeb7a 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go @@ -1,15 +1,16 @@ //go:build armbe || mips || mips64p32 -// +build armbe mips mips64p32 package sys import ( + "structs" "unsafe" ) // Pointer wraps an unsafe.Pointer to be 64bit to // conform to the syscall specification. type Pointer struct { + structs.HostLayout pad uint32 ptr unsafe.Pointer } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go index a6a51edb6..f9007fe84 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go @@ -1,15 +1,16 @@ //go:build 386 || amd64p32 || arm || mipsle || mips64p32le -// +build 386 amd64p32 arm mipsle mips64p32le package sys import ( + "structs" "unsafe" ) // Pointer wraps an unsafe.Pointer to be 64bit to // conform to the syscall specification. type Pointer struct { + structs.HostLayout ptr unsafe.Pointer pad uint32 } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go index 7c0279e48..05196cca7 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go @@ -1,14 +1,15 @@ //go:build !386 && !amd64p32 && !arm && !mipsle && !mips64p32le && !armbe && !mips && !mips64p32 -// +build !386,!amd64p32,!arm,!mipsle,!mips64p32le,!armbe,!mips,!mips64p32 package sys import ( + "structs" "unsafe" ) // Pointer wraps an unsafe.Pointer to be 64bit to // conform to the syscall specification. type Pointer struct { + structs.HostLayout ptr unsafe.Pointer } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/signals.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/signals.go new file mode 100644 index 000000000..e75e96052 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/signals.go @@ -0,0 +1,85 @@ +//go:build !windows + +package sys + +import ( + "fmt" + "runtime" + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +// A sigset containing only SIGPROF. +var profSet unix.Sigset_t + +func init() { + // See sigsetAdd for details on the implementation. Open coded here so + // that the compiler will check the constant calculations for us. + profSet.Val[sigprofBit/wordBits] |= 1 << (sigprofBit % wordBits) +} + +// maskProfilerSignal locks the calling goroutine to its underlying OS thread +// and adds SIGPROF to the thread's signal mask. This prevents pprof from +// interrupting expensive syscalls like e.g. BPF_PROG_LOAD. +// +// The caller must defer unmaskProfilerSignal() to reverse the operation. +func maskProfilerSignal() { + runtime.LockOSThread() + + if err := unix.PthreadSigmask(unix.SIG_BLOCK, &profSet, nil); err != nil { + runtime.UnlockOSThread() + panic(fmt.Errorf("masking profiler signal: %w", err)) + } +} + +// unmaskProfilerSignal removes SIGPROF from the underlying thread's signal +// mask, allowing it to be interrupted for profiling once again. +// +// It also unlocks the current goroutine from its underlying OS thread. +func unmaskProfilerSignal() { + defer runtime.UnlockOSThread() + + if err := unix.PthreadSigmask(unix.SIG_UNBLOCK, &profSet, nil); err != nil { + panic(fmt.Errorf("unmasking profiler signal: %w", err)) + } +} + +const ( + // Signal is the nth bit in the bitfield. + sigprofBit = int(unix.SIGPROF - 1) + // The number of bits in one Sigset_t word. + wordBits = int(unsafe.Sizeof(unix.Sigset_t{}.Val[0])) * 8 +) + +// sigsetAdd adds signal to set. +// +// Note: Sigset_t.Val's value type is uint32 or uint64 depending on the arch. +// This function must be able to deal with both and so must avoid any direct +// references to u32 or u64 types. +func sigsetAdd(set *unix.Sigset_t, signal unix.Signal) error { + if signal < 1 { + return fmt.Errorf("signal %d must be larger than 0", signal) + } + + // For amd64, runtime.sigaddset() performs the following operation: + // set[(signal-1)/32] |= 1 << ((uint32(signal) - 1) & 31) + // + // This trick depends on sigset being two u32's, causing a signal in the + // bottom 31 bits to be written to the low word if bit 32 is low, or the high + // word if bit 32 is high. + + // Signal is the nth bit in the bitfield. + bit := int(signal - 1) + // Word within the sigset the bit needs to be written to. + word := bit / wordBits + + if word >= len(set.Val) { + return fmt.Errorf("signal %d does not fit within unix.Sigset_t", signal) + } + + // Write the signal bit into its corresponding word at the corrected offset. + set.Val[word] |= 1 << (bit % wordBits) + + return nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/syscall.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/syscall.go index dd515f0eb..f2fffd26b 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/syscall.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/syscall.go @@ -2,41 +2,22 @@ package sys import ( "runtime" - "syscall" "unsafe" "github.com/cilium/ebpf/internal/unix" ) -// BPF wraps SYS_BPF. +// ENOTSUPP is a Linux internal error code that has leaked into UAPI. // -// Any pointers contained in attr must use the Pointer type from this package. -func BPF(cmd Cmd, attr unsafe.Pointer, size uintptr) (uintptr, error) { - for { - r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size) - runtime.KeepAlive(attr) - - // As of ~4.20 the verifier can be interrupted by a signal, - // and returns EAGAIN in that case. - if errNo == unix.EAGAIN && cmd == BPF_PROG_LOAD { - continue - } - - var err error - if errNo != 0 { - err = wrappedErrno{errNo} - } - - return r1, err - } -} +// It is not the same as ENOTSUP or EOPNOTSUPP. +const ENOTSUPP = unix.Errno(524) // Info is implemented by all structs that can be passed to the ObjInfo syscall. // -// MapInfo -// ProgInfo -// LinkInfo -// BtfInfo +// MapInfo +// ProgInfo +// LinkInfo +// BtfInfo type Info interface { info() (unsafe.Pointer, uint32) } @@ -59,12 +40,52 @@ func (i *LinkInfo) info() (unsafe.Pointer, uint32) { return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) } +func (i *TracingLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *CgroupLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *NetNsLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *XDPLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *TcxLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *NetfilterLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *NetkitLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *KprobeMultiLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *KprobeLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + var _ Info = (*BtfInfo)(nil) func (i *BtfInfo) info() (unsafe.Pointer, uint32) { return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) } +func (i *PerfEventLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + // ObjInfo retrieves information about a BPF Fd. // // info may be one of MapInfo, ProgInfo, LinkInfo and BtfInfo. @@ -73,7 +94,7 @@ func ObjInfo(fd *FD, info Info) error { err := ObjGetInfoByFd(&ObjGetInfoByFdAttr{ BpfFd: fd.Uint(), InfoLen: len, - Info: NewPointer(ptr), + Info: UnsafePointer(ptr), }) runtime.KeepAlive(fd) return err @@ -81,36 +102,83 @@ func ObjInfo(fd *FD, info Info) error { // BPFObjName is a null-terminated string made up of // 'A-Za-z0-9_' characters. -type ObjName [unix.BPF_OBJ_NAME_LEN]byte +type ObjName [BPF_OBJ_NAME_LEN]byte // NewObjName truncates the result if it is too long. func NewObjName(name string) ObjName { var result ObjName - copy(result[:unix.BPF_OBJ_NAME_LEN-1], name) + copy(result[:BPF_OBJ_NAME_LEN-1], name) return result } +// LogLevel controls the verbosity of the kernel's eBPF program verifier. +type LogLevel uint32 + +const ( + BPF_LOG_LEVEL1 LogLevel = 1 << iota + BPF_LOG_LEVEL2 + BPF_LOG_STATS +) + +// MapID uniquely identifies a bpf_map. +type MapID uint32 + +// ProgramID uniquely identifies a bpf_map. +type ProgramID uint32 + // LinkID uniquely identifies a bpf_link. type LinkID uint32 -// wrappedErrno wraps syscall.Errno to prevent direct comparisons with +// BTFID uniquely identifies a BTF blob loaded into the kernel. +type BTFID uint32 + +// TypeID identifies a type in a BTF blob. +type TypeID uint32 + +// Flags used by bpf_mprog. +const ( + BPF_F_REPLACE = 1 << (iota + 2) + BPF_F_BEFORE + BPF_F_AFTER + BPF_F_ID + BPF_F_LINK_MPROG = 1 << 13 // aka BPF_F_LINK +) + +// Flags used by BPF_PROG_LOAD. +const ( + BPF_F_SLEEPABLE = 1 << 4 + BPF_F_XDP_HAS_FRAGS = 1 << 5 + BPF_F_XDP_DEV_BOUND_ONLY = 1 << 6 +) + +const BPF_TAG_SIZE = 8 +const BPF_OBJ_NAME_LEN = 16 + +// wrappedErrno wraps [unix.Errno] to prevent direct comparisons with // syscall.E* or unix.E* constants. // // You should never export an error of this type. type wrappedErrno struct { - syscall.Errno + unix.Errno } func (we wrappedErrno) Unwrap() error { return we.Errno } +func (we wrappedErrno) Error() string { + if we.Errno == ENOTSUPP { + return "operation not supported" + } + return we.Errno.Error() +} + type syscallError struct { error - errno syscall.Errno + errno unix.Errno } -func Error(err error, errno syscall.Errno) error { +func Error(err error, errno unix.Errno) error { return &syscallError{err, errno} } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/syscall_other.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/syscall_other.go new file mode 100644 index 000000000..b99e6e462 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/syscall_other.go @@ -0,0 +1,84 @@ +//go:build !windows + +package sys + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +// BPF wraps SYS_BPF. +// +// Any pointers contained in attr must use the Pointer type from this package. +func BPF(cmd Cmd, attr unsafe.Pointer, size uintptr) (uintptr, error) { + // Prevent the Go profiler from repeatedly interrupting the verifier, + // which could otherwise lead to a livelock due to receiving EAGAIN. + if cmd == BPF_PROG_LOAD || cmd == BPF_PROG_RUN { + maskProfilerSignal() + defer unmaskProfilerSignal() + } + + for { + r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size) + runtime.KeepAlive(attr) + + // As of ~4.20 the verifier can be interrupted by a signal, + // and returns EAGAIN in that case. + if errNo == unix.EAGAIN && cmd == BPF_PROG_LOAD { + continue + } + + var err error + if errNo != 0 { + err = wrappedErrno{errNo} + } + + return r1, err + } +} + +// ObjGetTyped wraps [ObjGet] with a readlink call to extract the type of the +// underlying bpf object. +func ObjGetTyped(attr *ObjGetAttr) (*FD, ObjType, error) { + fd, err := ObjGet(attr) + if err != nil { + return nil, 0, err + } + + typ, err := readType(fd) + if err != nil { + _ = fd.Close() + return nil, 0, fmt.Errorf("reading fd type: %w", err) + } + + return fd, typ, nil +} + +// readType returns the bpf object type of the file descriptor by calling +// readlink(3). Returns an error if the file descriptor does not represent a bpf +// object. +func readType(fd *FD) (ObjType, error) { + s, err := os.Readlink(filepath.Join("/proc/self/fd/", fd.String())) + if err != nil { + return 0, fmt.Errorf("readlink fd %d: %w", fd.Int(), err) + } + + s = strings.TrimPrefix(s, "anon_inode:") + + switch s { + case "bpf-map": + return BPF_TYPE_MAP, nil + case "bpf-prog": + return BPF_TYPE_PROG, nil + case "bpf-link": + return BPF_TYPE_LINK, nil + } + + return 0, fmt.Errorf("unknown type %s of fd %d", s, fd.Int()) +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/syscall_windows.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/syscall_windows.go new file mode 100644 index 000000000..08f73805c --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/syscall_windows.go @@ -0,0 +1,69 @@ +package sys + +import ( + "fmt" + "syscall" + "unsafe" + + "golang.org/x/sys/windows" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/efw" + "github.com/cilium/ebpf/internal/unix" +) + +// BPF calls the BPF syscall wrapper in ebpfapi.dll. +// +// Any pointers contained in attr must use the Pointer type from this package. +// +// The implementation lives in https://github.com/microsoft/ebpf-for-windows/blob/main/libs/api/bpf_syscall.cpp +func BPF(cmd Cmd, attr unsafe.Pointer, size uintptr) (uintptr, error) { + // On Linux we need to guard against preemption by the profiler here. On + // Windows it seems like a cgocall may not be preempted: + // https://github.com/golang/go/blob/8b51146c698bcfcc2c2b73fa9390db5230f2ce0a/src/runtime/os_windows.go#L1240-L1246 + + addr, err := efw.BPF.Find() + if err != nil { + return 0, err + } + + // Using [LazyProc.Call] forces attr to escape, which isn't the case when using syscall.Syscall directly. + r1, _, lastError := syscall.SyscallN(addr, uintptr(cmd), uintptr(attr), size) + + if ret := int(efw.Int(r1)); ret < 0 { + errNo := unix.Errno(-ret) + if errNo == unix.EINVAL && lastError == windows.ERROR_CALL_NOT_IMPLEMENTED { + return 0, internal.ErrNotSupportedOnOS + } + return 0, wrappedErrno{errNo} + } + + return r1, nil +} + +// ObjGetTyped retrieves an pinned object and its type. +func ObjGetTyped(attr *ObjGetAttr) (*FD, ObjType, error) { + fd, err := ObjGet(attr) + if err != nil { + return nil, 0, err + } + + efwType, err := efw.EbpfObjectGetInfoByFd(fd.Int(), nil, nil) + if err != nil { + _ = fd.Close() + return nil, 0, err + } + + switch efwType { + case efw.EBPF_OBJECT_UNKNOWN: + return fd, BPF_TYPE_UNSPEC, nil + case efw.EBPF_OBJECT_MAP: + return fd, BPF_TYPE_MAP, nil + case efw.EBPF_OBJECT_LINK: + return fd, BPF_TYPE_LINK, nil + case efw.EBPF_OBJECT_PROGRAM: + return fd, BPF_TYPE_PROG, nil + default: + return nil, 0, fmt.Errorf("unrecognized object type %v", efwType) + } +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/types.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/types.go index ab40cef6d..2e6674862 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/types.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sys/types.go @@ -3,62 +3,251 @@ package sys import ( + "structs" "unsafe" ) -type AdjRoomMode int32 +const ( + BPF_ADJ_ROOM_ENCAP_L2_MASK = 255 + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56 + BPF_ANY = 0 + BPF_CSUM_LEVEL_DEC = 2 + BPF_CSUM_LEVEL_INC = 1 + BPF_CSUM_LEVEL_QUERY = 0 + BPF_CSUM_LEVEL_RESET = 3 + BPF_EXIST = 2 + BPF_FIB_LKUP_RET_BLACKHOLE = 1 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 8 + BPF_FIB_LKUP_RET_FWD_DISABLED = 5 + BPF_FIB_LKUP_RET_NOT_FWDED = 4 + BPF_FIB_LKUP_RET_NO_NEIGH = 7 + BPF_FIB_LKUP_RET_NO_SRC_ADDR = 9 + BPF_FIB_LKUP_RET_PROHIBIT = 3 + BPF_FIB_LKUP_RET_SUCCESS = 0 + BPF_FIB_LKUP_RET_UNREACHABLE = 2 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 6 + BPF_FIB_LOOKUP_DIRECT = 1 + BPF_FIB_LOOKUP_MARK = 32 + BPF_FIB_LOOKUP_OUTPUT = 2 + BPF_FIB_LOOKUP_SKIP_NEIGH = 4 + BPF_FIB_LOOKUP_SRC = 16 + BPF_FIB_LOOKUP_TBID = 8 + BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 1 + BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 4 + BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 2 + BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = 128 + BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = 256 + BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 64 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 16 + BPF_F_ADJ_ROOM_FIXED_GSO = 1 + BPF_F_ADJ_ROOM_NO_CSUM_RESET = 32 + BPF_F_BPRM_SECUREEXEC = 1 + BPF_F_BROADCAST = 8 + BPF_F_CLONE = 512 + BPF_F_CTXLEN_MASK = 4503595332403200 + BPF_F_CURRENT_CPU = 4294967295 + BPF_F_CURRENT_NETNS = 18446744073709551615 + BPF_F_DONT_FRAGMENT = 4 + BPF_F_EXCLUDE_INGRESS = 16 + BPF_F_FAST_STACK_CMP = 512 + BPF_F_GET_BRANCH_RECORDS_SIZE = 1 + BPF_F_HDR_FIELD_MASK = 15 + BPF_F_INDEX_MASK = 4294967295 + BPF_F_INGRESS = 1 + BPF_F_INNER_MAP = 4096 + BPF_F_INVALIDATE_HASH = 2 + BPF_F_KPROBE_MULTI_RETURN = 1 + BPF_F_LINK = 8192 + BPF_F_LOCK = 4 + BPF_F_MARK_ENFORCE = 64 + BPF_F_MARK_MANGLED_0 = 32 + BPF_F_MMAPABLE = 1024 + BPF_F_NEIGH = 2 + BPF_F_NEXTHOP = 8 + BPF_F_NO_COMMON_LRU = 2 + BPF_F_NO_PREALLOC = 1 + BPF_F_NO_TUNNEL_KEY = 16 + BPF_F_NO_USER_CONV = 262144 + BPF_F_NUMA_NODE = 4 + BPF_F_PATH_FD = 16384 + BPF_F_PEER = 4 + BPF_F_PRESERVE_ELEMS = 2048 + BPF_F_PSEUDO_HDR = 16 + BPF_F_RDONLY = 8 + BPF_F_RDONLY_PROG = 128 + BPF_F_RECOMPUTE_CSUM = 1 + BPF_F_REUSE_STACKID = 1024 + BPF_F_SEGV_ON_FAULT = 131072 + BPF_F_SEQ_NUMBER = 8 + BPF_F_SKIP_FIELD_MASK = 255 + BPF_F_STACK_BUILD_ID = 32 + BPF_F_SYSCTL_BASE_NAME = 1 + BPF_F_TIMER_ABS = 1 + BPF_F_TIMER_CPU_PIN = 2 + BPF_F_TOKEN_FD = 65536 + BPF_F_TUNINFO_FLAGS = 16 + BPF_F_TUNINFO_IPV6 = 1 + BPF_F_UPROBE_MULTI_RETURN = 1 + BPF_F_USER_BUILD_ID = 2048 + BPF_F_USER_STACK = 256 + BPF_F_VTYPE_BTF_OBJ_FD = 32768 + BPF_F_WRONLY = 16 + BPF_F_WRONLY_PROG = 256 + BPF_F_ZERO_CSUM_TX = 2 + BPF_F_ZERO_SEED = 64 + BPF_LOAD_HDR_OPT_TCP_SYN = 1 + BPF_LOCAL_STORAGE_GET_F_CREATE = 1 + BPF_MAX_LOOPS = 8388608 + BPF_MAX_TRAMP_LINKS = 38 + BPF_NOEXIST = 1 + BPF_RB_AVAIL_DATA = 0 + BPF_RB_CONS_POS = 2 + BPF_RB_FORCE_WAKEUP = 2 + BPF_RB_NO_WAKEUP = 1 + BPF_RB_PROD_POS = 3 + BPF_RB_RING_SIZE = 1 + BPF_REG_0 = 0 + BPF_REG_1 = 1 + BPF_REG_10 = 10 + BPF_REG_2 = 2 + BPF_REG_3 = 3 + BPF_REG_4 = 4 + BPF_REG_5 = 5 + BPF_REG_6 = 6 + BPF_REG_7 = 7 + BPF_REG_8 = 8 + BPF_REG_9 = 9 + BPF_RINGBUF_BUSY_BIT = 2147483648 + BPF_RINGBUF_DISCARD_BIT = 1073741824 + BPF_RINGBUF_HDR_SZ = 8 + BPF_SKB_CLOCK_MONOTONIC = 1 + BPF_SKB_CLOCK_REALTIME = 0 + BPF_SKB_CLOCK_TAI = 2 + BPF_SKB_TSTAMP_DELIVERY_MONO = 1 + BPF_SKB_TSTAMP_UNSPEC = 0 + BPF_SK_LOOKUP_F_NO_REUSEPORT = 2 + BPF_SK_LOOKUP_F_REPLACE = 1 + BPF_SK_STORAGE_GET_F_CREATE = 1 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 4 + BPF_SOCK_OPS_ALL_CB_FLAGS = 127 + BPF_SOCK_OPS_BASE_RTT = 7 + BPF_SOCK_OPS_HDR_OPT_LEN_CB = 14 + BPF_SOCK_OPS_NEEDS_ECN = 6 + BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = 16 + BPF_SOCK_OPS_PARSE_HDR_OPT_CB = 13 + BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = 32 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 5 + BPF_SOCK_OPS_RETRANS_CB = 9 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 2 + BPF_SOCK_OPS_RTO_CB = 8 + BPF_SOCK_OPS_RTO_CB_FLAG = 1 + BPF_SOCK_OPS_RTT_CB = 12 + BPF_SOCK_OPS_RTT_CB_FLAG = 8 + BPF_SOCK_OPS_RWND_INIT = 2 + BPF_SOCK_OPS_STATE_CB = 10 + BPF_SOCK_OPS_STATE_CB_FLAG = 4 + BPF_SOCK_OPS_TCP_CONNECT_CB = 3 + BPF_SOCK_OPS_TCP_LISTEN_CB = 11 + BPF_SOCK_OPS_TIMEOUT_INIT = 1 + BPF_SOCK_OPS_VOID = 0 + BPF_SOCK_OPS_WRITE_HDR_OPT_CB = 15 + BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = 64 + BPF_TASK_ITER_ALL_PROCS = 0 + BPF_TASK_ITER_ALL_THREADS = 1 + BPF_TASK_ITER_PROC_THREADS = 2 + BPF_TCP_BOUND_INACTIVE = 13 + BPF_TCP_CLOSE = 7 + BPF_TCP_CLOSE_WAIT = 8 + BPF_TCP_CLOSING = 11 + BPF_TCP_ESTABLISHED = 1 + BPF_TCP_FIN_WAIT1 = 4 + BPF_TCP_FIN_WAIT2 = 5 + BPF_TCP_LAST_ACK = 9 + BPF_TCP_LISTEN = 10 + BPF_TCP_MAX_STATES = 14 + BPF_TCP_NEW_SYN_RECV = 12 + BPF_TCP_SYN_RECV = 3 + BPF_TCP_SYN_SENT = 2 + BPF_TCP_TIME_WAIT = 6 + BPF_WRITE_HDR_TCP_CURRENT_MSS = 1 + BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2 + BPF_XFRM_STATE_OPTS_SZ = 36 +) + +type AdjRoomMode uint32 const ( BPF_ADJ_ROOM_NET AdjRoomMode = 0 BPF_ADJ_ROOM_MAC AdjRoomMode = 1 ) -type AttachType int32 +type AttachType uint32 const ( - BPF_CGROUP_INET_INGRESS AttachType = 0 - BPF_CGROUP_INET_EGRESS AttachType = 1 - BPF_CGROUP_INET_SOCK_CREATE AttachType = 2 - BPF_CGROUP_SOCK_OPS AttachType = 3 - BPF_SK_SKB_STREAM_PARSER AttachType = 4 - BPF_SK_SKB_STREAM_VERDICT AttachType = 5 - BPF_CGROUP_DEVICE AttachType = 6 - BPF_SK_MSG_VERDICT AttachType = 7 - BPF_CGROUP_INET4_BIND AttachType = 8 - BPF_CGROUP_INET6_BIND AttachType = 9 - BPF_CGROUP_INET4_CONNECT AttachType = 10 - BPF_CGROUP_INET6_CONNECT AttachType = 11 - BPF_CGROUP_INET4_POST_BIND AttachType = 12 - BPF_CGROUP_INET6_POST_BIND AttachType = 13 - BPF_CGROUP_UDP4_SENDMSG AttachType = 14 - BPF_CGROUP_UDP6_SENDMSG AttachType = 15 - BPF_LIRC_MODE2 AttachType = 16 - BPF_FLOW_DISSECTOR AttachType = 17 - BPF_CGROUP_SYSCTL AttachType = 18 - BPF_CGROUP_UDP4_RECVMSG AttachType = 19 - BPF_CGROUP_UDP6_RECVMSG AttachType = 20 - BPF_CGROUP_GETSOCKOPT AttachType = 21 - BPF_CGROUP_SETSOCKOPT AttachType = 22 - BPF_TRACE_RAW_TP AttachType = 23 - BPF_TRACE_FENTRY AttachType = 24 - BPF_TRACE_FEXIT AttachType = 25 - BPF_MODIFY_RETURN AttachType = 26 - BPF_LSM_MAC AttachType = 27 - BPF_TRACE_ITER AttachType = 28 - BPF_CGROUP_INET4_GETPEERNAME AttachType = 29 - BPF_CGROUP_INET6_GETPEERNAME AttachType = 30 - BPF_CGROUP_INET4_GETSOCKNAME AttachType = 31 - BPF_CGROUP_INET6_GETSOCKNAME AttachType = 32 - BPF_XDP_DEVMAP AttachType = 33 - BPF_CGROUP_INET_SOCK_RELEASE AttachType = 34 - BPF_XDP_CPUMAP AttachType = 35 - BPF_SK_LOOKUP AttachType = 36 - BPF_XDP AttachType = 37 - BPF_SK_SKB_VERDICT AttachType = 38 - __MAX_BPF_ATTACH_TYPE AttachType = 39 + BPF_CGROUP_INET_INGRESS AttachType = 0 + BPF_CGROUP_INET_EGRESS AttachType = 1 + BPF_CGROUP_INET_SOCK_CREATE AttachType = 2 + BPF_CGROUP_SOCK_OPS AttachType = 3 + BPF_SK_SKB_STREAM_PARSER AttachType = 4 + BPF_SK_SKB_STREAM_VERDICT AttachType = 5 + BPF_CGROUP_DEVICE AttachType = 6 + BPF_SK_MSG_VERDICT AttachType = 7 + BPF_CGROUP_INET4_BIND AttachType = 8 + BPF_CGROUP_INET6_BIND AttachType = 9 + BPF_CGROUP_INET4_CONNECT AttachType = 10 + BPF_CGROUP_INET6_CONNECT AttachType = 11 + BPF_CGROUP_INET4_POST_BIND AttachType = 12 + BPF_CGROUP_INET6_POST_BIND AttachType = 13 + BPF_CGROUP_UDP4_SENDMSG AttachType = 14 + BPF_CGROUP_UDP6_SENDMSG AttachType = 15 + BPF_LIRC_MODE2 AttachType = 16 + BPF_FLOW_DISSECTOR AttachType = 17 + BPF_CGROUP_SYSCTL AttachType = 18 + BPF_CGROUP_UDP4_RECVMSG AttachType = 19 + BPF_CGROUP_UDP6_RECVMSG AttachType = 20 + BPF_CGROUP_GETSOCKOPT AttachType = 21 + BPF_CGROUP_SETSOCKOPT AttachType = 22 + BPF_TRACE_RAW_TP AttachType = 23 + BPF_TRACE_FENTRY AttachType = 24 + BPF_TRACE_FEXIT AttachType = 25 + BPF_MODIFY_RETURN AttachType = 26 + BPF_LSM_MAC AttachType = 27 + BPF_TRACE_ITER AttachType = 28 + BPF_CGROUP_INET4_GETPEERNAME AttachType = 29 + BPF_CGROUP_INET6_GETPEERNAME AttachType = 30 + BPF_CGROUP_INET4_GETSOCKNAME AttachType = 31 + BPF_CGROUP_INET6_GETSOCKNAME AttachType = 32 + BPF_XDP_DEVMAP AttachType = 33 + BPF_CGROUP_INET_SOCK_RELEASE AttachType = 34 + BPF_XDP_CPUMAP AttachType = 35 + BPF_SK_LOOKUP AttachType = 36 + BPF_XDP AttachType = 37 + BPF_SK_SKB_VERDICT AttachType = 38 + BPF_SK_REUSEPORT_SELECT AttachType = 39 + BPF_SK_REUSEPORT_SELECT_OR_MIGRATE AttachType = 40 + BPF_PERF_EVENT AttachType = 41 + BPF_TRACE_KPROBE_MULTI AttachType = 42 + BPF_LSM_CGROUP AttachType = 43 + BPF_STRUCT_OPS AttachType = 44 + BPF_NETFILTER AttachType = 45 + BPF_TCX_INGRESS AttachType = 46 + BPF_TCX_EGRESS AttachType = 47 + BPF_TRACE_UPROBE_MULTI AttachType = 48 + BPF_CGROUP_UNIX_CONNECT AttachType = 49 + BPF_CGROUP_UNIX_SENDMSG AttachType = 50 + BPF_CGROUP_UNIX_RECVMSG AttachType = 51 + BPF_CGROUP_UNIX_GETPEERNAME AttachType = 52 + BPF_CGROUP_UNIX_GETSOCKNAME AttachType = 53 + BPF_NETKIT_PRIMARY AttachType = 54 + BPF_NETKIT_PEER AttachType = 55 + BPF_TRACE_KPROBE_SESSION AttachType = 56 + __MAX_BPF_ATTACH_TYPE AttachType = 57 ) -type Cmd int32 +type Cmd uint32 const ( BPF_MAP_CREATE Cmd = 0 @@ -72,6 +261,7 @@ const ( BPF_PROG_ATTACH Cmd = 8 BPF_PROG_DETACH Cmd = 9 BPF_PROG_TEST_RUN Cmd = 10 + BPF_PROG_RUN Cmd = 10 BPF_PROG_GET_NEXT_ID Cmd = 11 BPF_MAP_GET_NEXT_ID Cmd = 12 BPF_PROG_GET_FD_BY_ID Cmd = 13 @@ -97,9 +287,11 @@ const ( BPF_ITER_CREATE Cmd = 33 BPF_LINK_DETACH Cmd = 34 BPF_PROG_BIND_MAP Cmd = 35 + BPF_TOKEN_CREATE Cmd = 36 + __MAX_BPF_CMD Cmd = 37 ) -type FunctionId int32 +type FunctionId uint32 const ( BPF_FUNC_unspec FunctionId = 0 @@ -268,17 +460,63 @@ const ( BPF_FUNC_check_mtu FunctionId = 163 BPF_FUNC_for_each_map_elem FunctionId = 164 BPF_FUNC_snprintf FunctionId = 165 - __BPF_FUNC_MAX_ID FunctionId = 166 + BPF_FUNC_sys_bpf FunctionId = 166 + BPF_FUNC_btf_find_by_name_kind FunctionId = 167 + BPF_FUNC_sys_close FunctionId = 168 + BPF_FUNC_timer_init FunctionId = 169 + BPF_FUNC_timer_set_callback FunctionId = 170 + BPF_FUNC_timer_start FunctionId = 171 + BPF_FUNC_timer_cancel FunctionId = 172 + BPF_FUNC_get_func_ip FunctionId = 173 + BPF_FUNC_get_attach_cookie FunctionId = 174 + BPF_FUNC_task_pt_regs FunctionId = 175 + BPF_FUNC_get_branch_snapshot FunctionId = 176 + BPF_FUNC_trace_vprintk FunctionId = 177 + BPF_FUNC_skc_to_unix_sock FunctionId = 178 + BPF_FUNC_kallsyms_lookup_name FunctionId = 179 + BPF_FUNC_find_vma FunctionId = 180 + BPF_FUNC_loop FunctionId = 181 + BPF_FUNC_strncmp FunctionId = 182 + BPF_FUNC_get_func_arg FunctionId = 183 + BPF_FUNC_get_func_ret FunctionId = 184 + BPF_FUNC_get_func_arg_cnt FunctionId = 185 + BPF_FUNC_get_retval FunctionId = 186 + BPF_FUNC_set_retval FunctionId = 187 + BPF_FUNC_xdp_get_buff_len FunctionId = 188 + BPF_FUNC_xdp_load_bytes FunctionId = 189 + BPF_FUNC_xdp_store_bytes FunctionId = 190 + BPF_FUNC_copy_from_user_task FunctionId = 191 + BPF_FUNC_skb_set_tstamp FunctionId = 192 + BPF_FUNC_ima_file_hash FunctionId = 193 + BPF_FUNC_kptr_xchg FunctionId = 194 + BPF_FUNC_map_lookup_percpu_elem FunctionId = 195 + BPF_FUNC_skc_to_mptcp_sock FunctionId = 196 + BPF_FUNC_dynptr_from_mem FunctionId = 197 + BPF_FUNC_ringbuf_reserve_dynptr FunctionId = 198 + BPF_FUNC_ringbuf_submit_dynptr FunctionId = 199 + BPF_FUNC_ringbuf_discard_dynptr FunctionId = 200 + BPF_FUNC_dynptr_read FunctionId = 201 + BPF_FUNC_dynptr_write FunctionId = 202 + BPF_FUNC_dynptr_data FunctionId = 203 + BPF_FUNC_tcp_raw_gen_syncookie_ipv4 FunctionId = 204 + BPF_FUNC_tcp_raw_gen_syncookie_ipv6 FunctionId = 205 + BPF_FUNC_tcp_raw_check_syncookie_ipv4 FunctionId = 206 + BPF_FUNC_tcp_raw_check_syncookie_ipv6 FunctionId = 207 + BPF_FUNC_ktime_get_tai_ns FunctionId = 208 + BPF_FUNC_user_ringbuf_drain FunctionId = 209 + BPF_FUNC_cgrp_storage_get FunctionId = 210 + BPF_FUNC_cgrp_storage_delete FunctionId = 211 + __BPF_FUNC_MAX_ID FunctionId = 212 ) -type HdrStartOff int32 +type HdrStartOff uint32 const ( BPF_HDR_START_MAC HdrStartOff = 0 BPF_HDR_START_NET HdrStartOff = 1 ) -type LinkType int32 +type LinkType uint32 const ( BPF_LINK_TYPE_UNSPEC LinkType = 0 @@ -288,45 +526,81 @@ const ( BPF_LINK_TYPE_ITER LinkType = 4 BPF_LINK_TYPE_NETNS LinkType = 5 BPF_LINK_TYPE_XDP LinkType = 6 - MAX_BPF_LINK_TYPE LinkType = 7 + BPF_LINK_TYPE_PERF_EVENT LinkType = 7 + BPF_LINK_TYPE_KPROBE_MULTI LinkType = 8 + BPF_LINK_TYPE_STRUCT_OPS LinkType = 9 + BPF_LINK_TYPE_NETFILTER LinkType = 10 + BPF_LINK_TYPE_TCX LinkType = 11 + BPF_LINK_TYPE_UPROBE_MULTI LinkType = 12 + BPF_LINK_TYPE_NETKIT LinkType = 13 + BPF_LINK_TYPE_SOCKMAP LinkType = 14 + __MAX_BPF_LINK_TYPE LinkType = 15 +) + +type MapType uint32 + +const ( + BPF_MAP_TYPE_UNSPEC MapType = 0 + BPF_MAP_TYPE_HASH MapType = 1 + BPF_MAP_TYPE_ARRAY MapType = 2 + BPF_MAP_TYPE_PROG_ARRAY MapType = 3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY MapType = 4 + BPF_MAP_TYPE_PERCPU_HASH MapType = 5 + BPF_MAP_TYPE_PERCPU_ARRAY MapType = 6 + BPF_MAP_TYPE_STACK_TRACE MapType = 7 + BPF_MAP_TYPE_CGROUP_ARRAY MapType = 8 + BPF_MAP_TYPE_LRU_HASH MapType = 9 + BPF_MAP_TYPE_LRU_PERCPU_HASH MapType = 10 + BPF_MAP_TYPE_LPM_TRIE MapType = 11 + BPF_MAP_TYPE_ARRAY_OF_MAPS MapType = 12 + BPF_MAP_TYPE_HASH_OF_MAPS MapType = 13 + BPF_MAP_TYPE_DEVMAP MapType = 14 + BPF_MAP_TYPE_SOCKMAP MapType = 15 + BPF_MAP_TYPE_CPUMAP MapType = 16 + BPF_MAP_TYPE_XSKMAP MapType = 17 + BPF_MAP_TYPE_SOCKHASH MapType = 18 + BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED MapType = 19 + BPF_MAP_TYPE_CGROUP_STORAGE MapType = 19 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY MapType = 20 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED MapType = 21 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE MapType = 21 + BPF_MAP_TYPE_QUEUE MapType = 22 + BPF_MAP_TYPE_STACK MapType = 23 + BPF_MAP_TYPE_SK_STORAGE MapType = 24 + BPF_MAP_TYPE_DEVMAP_HASH MapType = 25 + BPF_MAP_TYPE_STRUCT_OPS MapType = 26 + BPF_MAP_TYPE_RINGBUF MapType = 27 + BPF_MAP_TYPE_INODE_STORAGE MapType = 28 + BPF_MAP_TYPE_TASK_STORAGE MapType = 29 + BPF_MAP_TYPE_BLOOM_FILTER MapType = 30 + BPF_MAP_TYPE_USER_RINGBUF MapType = 31 + BPF_MAP_TYPE_CGRP_STORAGE MapType = 32 + BPF_MAP_TYPE_ARENA MapType = 33 + __MAX_BPF_MAP_TYPE MapType = 34 ) -type MapType int32 +type ObjType uint32 const ( - BPF_MAP_TYPE_UNSPEC MapType = 0 - BPF_MAP_TYPE_HASH MapType = 1 - BPF_MAP_TYPE_ARRAY MapType = 2 - BPF_MAP_TYPE_PROG_ARRAY MapType = 3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY MapType = 4 - BPF_MAP_TYPE_PERCPU_HASH MapType = 5 - BPF_MAP_TYPE_PERCPU_ARRAY MapType = 6 - BPF_MAP_TYPE_STACK_TRACE MapType = 7 - BPF_MAP_TYPE_CGROUP_ARRAY MapType = 8 - BPF_MAP_TYPE_LRU_HASH MapType = 9 - BPF_MAP_TYPE_LRU_PERCPU_HASH MapType = 10 - BPF_MAP_TYPE_LPM_TRIE MapType = 11 - BPF_MAP_TYPE_ARRAY_OF_MAPS MapType = 12 - BPF_MAP_TYPE_HASH_OF_MAPS MapType = 13 - BPF_MAP_TYPE_DEVMAP MapType = 14 - BPF_MAP_TYPE_SOCKMAP MapType = 15 - BPF_MAP_TYPE_CPUMAP MapType = 16 - BPF_MAP_TYPE_XSKMAP MapType = 17 - BPF_MAP_TYPE_SOCKHASH MapType = 18 - BPF_MAP_TYPE_CGROUP_STORAGE MapType = 19 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY MapType = 20 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE MapType = 21 - BPF_MAP_TYPE_QUEUE MapType = 22 - BPF_MAP_TYPE_STACK MapType = 23 - BPF_MAP_TYPE_SK_STORAGE MapType = 24 - BPF_MAP_TYPE_DEVMAP_HASH MapType = 25 - BPF_MAP_TYPE_STRUCT_OPS MapType = 26 - BPF_MAP_TYPE_RINGBUF MapType = 27 - BPF_MAP_TYPE_INODE_STORAGE MapType = 28 - BPF_MAP_TYPE_TASK_STORAGE MapType = 29 + BPF_TYPE_UNSPEC ObjType = 0 + BPF_TYPE_PROG ObjType = 1 + BPF_TYPE_MAP ObjType = 2 + BPF_TYPE_LINK ObjType = 3 ) -type ProgType int32 +type PerfEventType uint32 + +const ( + BPF_PERF_EVENT_UNSPEC PerfEventType = 0 + BPF_PERF_EVENT_UPROBE PerfEventType = 1 + BPF_PERF_EVENT_URETPROBE PerfEventType = 2 + BPF_PERF_EVENT_KPROBE PerfEventType = 3 + BPF_PERF_EVENT_KRETPROBE PerfEventType = 4 + BPF_PERF_EVENT_TRACEPOINT PerfEventType = 5 + BPF_PERF_EVENT_EVENT PerfEventType = 6 +) + +type ProgType uint32 const ( BPF_PROG_TYPE_UNSPEC ProgType = 0 @@ -360,25 +634,29 @@ const ( BPF_PROG_TYPE_EXT ProgType = 28 BPF_PROG_TYPE_LSM ProgType = 29 BPF_PROG_TYPE_SK_LOOKUP ProgType = 30 + BPF_PROG_TYPE_SYSCALL ProgType = 31 + BPF_PROG_TYPE_NETFILTER ProgType = 32 + __MAX_BPF_PROG_TYPE ProgType = 33 ) -type RetCode int32 +type RetCode uint32 const ( - BPF_OK RetCode = 0 - BPF_DROP RetCode = 2 - BPF_REDIRECT RetCode = 7 - BPF_LWT_REROUTE RetCode = 128 + BPF_OK RetCode = 0 + BPF_DROP RetCode = 2 + BPF_REDIRECT RetCode = 7 + BPF_LWT_REROUTE RetCode = 128 + BPF_FLOW_DISSECTOR_CONTINUE RetCode = 129 ) -type SkAction int32 +type SkAction uint32 const ( SK_DROP SkAction = 0 SK_PASS SkAction = 1 ) -type StackBuildIdStatus int32 +type StackBuildIdStatus uint32 const ( BPF_STACK_BUILD_ID_EMPTY StackBuildIdStatus = 0 @@ -386,13 +664,22 @@ const ( BPF_STACK_BUILD_ID_IP StackBuildIdStatus = 2 ) -type StatsType int32 +type StatsType uint32 const ( BPF_STATS_RUN_TIME StatsType = 0 ) -type XdpAction int32 +type TcxActionBase int32 + +const ( + TCX_NEXT TcxActionBase = -1 + TCX_PASS TcxActionBase = 0 + TCX_DROP TcxActionBase = 2 + TCX_REDIRECT TcxActionBase = 7 +) + +type XdpAction uint32 const ( XDP_ABORTED XdpAction = 0 @@ -403,20 +690,23 @@ const ( ) type BtfInfo struct { - Btf Pointer + _ structs.HostLayout + Btf TypedPointer[uint8] BtfSize uint32 - Id uint32 - Name Pointer + Id BTFID + Name TypedPointer[uint8] NameLen uint32 KernelBtf uint32 } type FuncInfo struct { + _ structs.HostLayout InsnOff uint32 TypeId uint32 } type LineInfo struct { + _ structs.HostLayout InsnOff uint32 FileNameOff uint32 LineOff uint32 @@ -424,43 +714,47 @@ type LineInfo struct { } type LinkInfo struct { + _ structs.HostLayout Type LinkType Id LinkID ProgId uint32 _ [4]byte - Extra [16]uint8 + Extra [48]uint8 } type MapInfo struct { + _ structs.HostLayout Type uint32 - Id uint32 + Id MapID KeySize uint32 ValueSize uint32 MaxEntries uint32 MapFlags uint32 Name ObjName Ifindex uint32 - BtfVmlinuxValueTypeId uint32 + BtfVmlinuxValueTypeId TypeID NetnsDev uint64 NetnsIno uint64 BtfId uint32 - BtfKeyTypeId uint32 - BtfValueTypeId uint32 - _ [4]byte + BtfKeyTypeId TypeID + BtfValueTypeId TypeID + BtfVmlinuxId uint32 + MapExtra uint64 } type ProgInfo struct { + _ structs.HostLayout Type uint32 Id uint32 Tag [8]uint8 JitedProgLen uint32 XlatedProgLen uint32 - JitedProgInsns uint64 - XlatedProgInsns Pointer + JitedProgInsns TypedPointer[uint8] + XlatedProgInsns TypedPointer[uint8] LoadTime uint64 CreatedByUid uint32 NrMapIds uint32 - MapIds Pointer + MapIds TypedPointer[MapID] Name ObjName Ifindex uint32 _ [4]byte /* unsupported bitfield */ @@ -468,15 +762,15 @@ type ProgInfo struct { NetnsIno uint64 NrJitedKsyms uint32 NrJitedFuncLens uint32 - JitedKsyms uint64 - JitedFuncLens uint64 - BtfId uint32 + JitedKsyms TypedPointer[uint64] + JitedFuncLens TypedPointer[uint32] + BtfId BTFID FuncInfoRecSize uint32 - FuncInfo uint64 + FuncInfo TypedPointer[uint8] NrFuncInfo uint32 NrLineInfo uint32 - LineInfo uint64 - JitedLineInfo uint64 + LineInfo TypedPointer[uint8] + JitedLineInfo TypedPointer[uint64] NrJitedLineInfo uint32 LineInfoRecSize uint32 JitedLineInfoRecSize uint32 @@ -485,10 +779,43 @@ type ProgInfo struct { RunTimeNs uint64 RunCnt uint64 RecursionMisses uint64 + VerifiedInsns uint32 + AttachBtfObjId BTFID + AttachBtfId TypeID + _ [4]byte +} + +type SkLookup struct { + _ structs.HostLayout + Cookie uint64 + Family uint32 + Protocol uint32 + RemoteIp4 [4]uint8 + RemoteIp6 [16]uint8 + RemotePort uint16 + _ [2]byte + LocalIp4 [4]uint8 + LocalIp6 [16]uint8 + LocalPort uint32 + IngressIfindex uint32 + _ [4]byte +} + +type XdpMd struct { + _ structs.HostLayout + Data uint32 + DataEnd uint32 + DataMeta uint32 + IngressIfindex uint32 + RxQueueIndex uint32 + EgressIfindex uint32 +} + +type BtfGetFdByIdAttr struct { + _ structs.HostLayout + Id uint32 } -type BtfGetFdByIdAttr struct{ Id uint32 } - func BtfGetFdById(attr *BtfGetFdByIdAttr) (*FD, error) { fd, err := BPF(BPF_BTF_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) if err != nil { @@ -497,13 +824,27 @@ func BtfGetFdById(attr *BtfGetFdByIdAttr) (*FD, error) { return NewFD(int(fd)) } +type BtfGetNextIdAttr struct { + _ structs.HostLayout + Id BTFID + NextId BTFID +} + +func BtfGetNextId(attr *BtfGetNextIdAttr) error { + _, err := BPF(BPF_BTF_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + type BtfLoadAttr struct { - Btf Pointer - BtfLogBuf Pointer - BtfSize uint32 - BtfLogSize uint32 - BtfLogLevel uint32 - _ [4]byte + _ structs.HostLayout + Btf TypedPointer[uint8] + BtfLogBuf TypedPointer[uint8] + BtfSize uint32 + BtfLogSize uint32 + BtfLogLevel uint32 + BtfLogTrueSize uint32 + BtfFlags uint32 + BtfTokenFd int32 } func BtfLoad(attr *BtfLoadAttr) (*FD, error) { @@ -514,7 +855,10 @@ func BtfLoad(attr *BtfLoadAttr) (*FD, error) { return NewFD(int(fd)) } -type EnableStatsAttr struct{ Type uint32 } +type EnableStatsAttr struct { + _ structs.HostLayout + Type uint32 +} func EnableStats(attr *EnableStatsAttr) (*FD, error) { fd, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) @@ -525,6 +869,7 @@ func EnableStats(attr *EnableStatsAttr) (*FD, error) { } type IterCreateAttr struct { + _ structs.HostLayout LinkFd uint32 Flags uint32 } @@ -538,12 +883,13 @@ func IterCreate(attr *IterCreateAttr) (*FD, error) { } type LinkCreateAttr struct { + _ structs.HostLayout ProgFd uint32 TargetFd uint32 AttachType AttachType Flags uint32 - TargetBtfId uint32 - _ [12]byte + TargetBtfId TypeID + _ [44]byte } func LinkCreate(attr *LinkCreateAttr) (*FD, error) { @@ -555,13 +901,14 @@ func LinkCreate(attr *LinkCreateAttr) (*FD, error) { } type LinkCreateIterAttr struct { + _ structs.HostLayout ProgFd uint32 TargetFd uint32 AttachType AttachType Flags uint32 IterInfo Pointer IterInfoLen uint32 - _ [4]byte + _ [36]byte } func LinkCreateIter(attr *LinkCreateIterAttr) (*FD, error) { @@ -572,7 +919,187 @@ func LinkCreateIter(attr *LinkCreateIterAttr) (*FD, error) { return NewFD(int(fd)) } +type LinkCreateKprobeMultiAttr struct { + _ structs.HostLayout + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + KprobeMultiFlags uint32 + Count uint32 + Syms StringSlicePointer + Addrs TypedPointer[uintptr] + Cookies TypedPointer[uint64] + _ [16]byte +} + +func LinkCreateKprobeMulti(attr *LinkCreateKprobeMultiAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateNetfilterAttr struct { + _ structs.HostLayout + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + Pf uint32 + Hooknum uint32 + Priority int32 + NetfilterFlags uint32 + _ [32]byte +} + +func LinkCreateNetfilter(attr *LinkCreateNetfilterAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateNetkitAttr struct { + _ structs.HostLayout + ProgFd uint32 + TargetIfindex uint32 + AttachType AttachType + Flags uint32 + RelativeFdOrId uint32 + _ [4]byte + ExpectedRevision uint64 + _ [32]byte +} + +func LinkCreateNetkit(attr *LinkCreateNetkitAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreatePerfEventAttr struct { + _ structs.HostLayout + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + BpfCookie uint64 + _ [40]byte +} + +func LinkCreatePerfEvent(attr *LinkCreatePerfEventAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateTcxAttr struct { + _ structs.HostLayout + ProgFd uint32 + TargetIfindex uint32 + AttachType AttachType + Flags uint32 + RelativeFdOrId uint32 + _ [4]byte + ExpectedRevision uint64 + _ [32]byte +} + +func LinkCreateTcx(attr *LinkCreateTcxAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateTracingAttr struct { + _ structs.HostLayout + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + TargetBtfId BTFID + _ [4]byte + Cookie uint64 + _ [32]byte +} + +func LinkCreateTracing(attr *LinkCreateTracingAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateUprobeMultiAttr struct { + _ structs.HostLayout + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + Path StringPointer + Offsets TypedPointer[uint64] + RefCtrOffsets TypedPointer[uint64] + Cookies TypedPointer[uint64] + Count uint32 + UprobeMultiFlags uint32 + Pid uint32 + _ [4]byte +} + +func LinkCreateUprobeMulti(attr *LinkCreateUprobeMultiAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkDetachAttr struct { + _ structs.HostLayout + LinkFd uint32 +} + +func LinkDetach(attr *LinkDetachAttr) error { + _, err := BPF(BPF_LINK_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type LinkGetFdByIdAttr struct { + _ structs.HostLayout + Id LinkID +} + +func LinkGetFdById(attr *LinkGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkGetNextIdAttr struct { + _ structs.HostLayout + Id LinkID + NextId LinkID +} + +func LinkGetNextId(attr *LinkGetNextIdAttr) error { + _, err := BPF(BPF_LINK_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + type LinkUpdateAttr struct { + _ structs.HostLayout LinkFd uint32 NewProgFd uint32 Flags uint32 @@ -585,6 +1112,7 @@ func LinkUpdate(attr *LinkUpdateAttr) error { } type MapCreateAttr struct { + _ structs.HostLayout MapType MapType KeySize uint32 ValueSize uint32 @@ -595,9 +1123,12 @@ type MapCreateAttr struct { MapName ObjName MapIfindex uint32 BtfFd uint32 - BtfKeyTypeId uint32 - BtfValueTypeId uint32 - BtfVmlinuxValueTypeId uint32 + BtfKeyTypeId TypeID + BtfValueTypeId TypeID + BtfVmlinuxValueTypeId TypeID + MapExtra uint64 + ValueTypeBtfObjFd int32 + MapTokenFd int32 } func MapCreate(attr *MapCreateAttr) (*FD, error) { @@ -609,6 +1140,7 @@ func MapCreate(attr *MapCreateAttr) (*FD, error) { } type MapDeleteBatchAttr struct { + _ structs.HostLayout InBatch Pointer OutBatch Pointer Keys Pointer @@ -625,6 +1157,7 @@ func MapDeleteBatch(attr *MapDeleteBatchAttr) error { } type MapDeleteElemAttr struct { + _ structs.HostLayout MapFd uint32 _ [4]byte Key Pointer @@ -637,14 +1170,20 @@ func MapDeleteElem(attr *MapDeleteElemAttr) error { return err } -type MapFreezeAttr struct{ MapFd uint32 } +type MapFreezeAttr struct { + _ structs.HostLayout + MapFd uint32 +} func MapFreeze(attr *MapFreezeAttr) error { _, err := BPF(BPF_MAP_FREEZE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) return err } -type MapGetFdByIdAttr struct{ Id uint32 } +type MapGetFdByIdAttr struct { + _ structs.HostLayout + Id uint32 +} func MapGetFdById(attr *MapGetFdByIdAttr) (*FD, error) { fd, err := BPF(BPF_MAP_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) @@ -655,6 +1194,7 @@ func MapGetFdById(attr *MapGetFdByIdAttr) (*FD, error) { } type MapGetNextIdAttr struct { + _ structs.HostLayout Id uint32 NextId uint32 } @@ -665,6 +1205,7 @@ func MapGetNextId(attr *MapGetNextIdAttr) error { } type MapGetNextKeyAttr struct { + _ structs.HostLayout MapFd uint32 _ [4]byte Key Pointer @@ -677,6 +1218,7 @@ func MapGetNextKey(attr *MapGetNextKeyAttr) error { } type MapLookupAndDeleteBatchAttr struct { + _ structs.HostLayout InBatch Pointer OutBatch Pointer Keys Pointer @@ -693,6 +1235,7 @@ func MapLookupAndDeleteBatch(attr *MapLookupAndDeleteBatchAttr) error { } type MapLookupAndDeleteElemAttr struct { + _ structs.HostLayout MapFd uint32 _ [4]byte Key Pointer @@ -706,6 +1249,7 @@ func MapLookupAndDeleteElem(attr *MapLookupAndDeleteElemAttr) error { } type MapLookupBatchAttr struct { + _ structs.HostLayout InBatch Pointer OutBatch Pointer Keys Pointer @@ -722,6 +1266,7 @@ func MapLookupBatch(attr *MapLookupBatchAttr) error { } type MapLookupElemAttr struct { + _ structs.HostLayout MapFd uint32 _ [4]byte Key Pointer @@ -735,6 +1280,7 @@ func MapLookupElem(attr *MapLookupElemAttr) error { } type MapUpdateBatchAttr struct { + _ structs.HostLayout InBatch Pointer OutBatch Pointer Keys Pointer @@ -751,6 +1297,7 @@ func MapUpdateBatch(attr *MapUpdateBatchAttr) error { } type MapUpdateElemAttr struct { + _ structs.HostLayout MapFd uint32 _ [4]byte Key Pointer @@ -764,9 +1311,12 @@ func MapUpdateElem(attr *MapUpdateElemAttr) error { } type ObjGetAttr struct { - Pathname Pointer + _ structs.HostLayout + Pathname StringPointer BpfFd uint32 FileFlags uint32 + PathFd int32 + _ [4]byte } func ObjGet(attr *ObjGetAttr) (*FD, error) { @@ -778,6 +1328,7 @@ func ObjGet(attr *ObjGetAttr) (*FD, error) { } type ObjGetInfoByFdAttr struct { + _ structs.HostLayout BpfFd uint32 InfoLen uint32 Info Pointer @@ -789,9 +1340,12 @@ func ObjGetInfoByFd(attr *ObjGetInfoByFdAttr) error { } type ObjPinAttr struct { - Pathname Pointer + _ structs.HostLayout + Pathname StringPointer BpfFd uint32 FileFlags uint32 + PathFd int32 + _ [4]byte } func ObjPin(attr *ObjPinAttr) error { @@ -800,11 +1354,14 @@ func ObjPin(attr *ObjPinAttr) error { } type ProgAttachAttr struct { - TargetFd uint32 - AttachBpfFd uint32 - AttachType uint32 - AttachFlags uint32 - ReplaceBpfFd uint32 + _ structs.HostLayout + TargetFdOrIfindex uint32 + AttachBpfFd uint32 + AttachType uint32 + AttachFlags uint32 + ReplaceBpfFd uint32 + RelativeFdOrId uint32 + ExpectedRevision uint64 } func ProgAttach(attr *ProgAttachAttr) error { @@ -813,6 +1370,7 @@ func ProgAttach(attr *ProgAttachAttr) error { } type ProgBindMapAttr struct { + _ structs.HostLayout ProgFd uint32 MapFd uint32 Flags uint32 @@ -824,9 +1382,14 @@ func ProgBindMap(attr *ProgBindMapAttr) error { } type ProgDetachAttr struct { - TargetFd uint32 - AttachBpfFd uint32 - AttachType uint32 + _ structs.HostLayout + TargetFdOrIfindex uint32 + AttachBpfFd uint32 + AttachType uint32 + AttachFlags uint32 + _ [4]byte + RelativeFdOrId uint32 + ExpectedRevision uint64 } func ProgDetach(attr *ProgDetachAttr) error { @@ -834,7 +1397,10 @@ func ProgDetach(attr *ProgDetachAttr) error { return err } -type ProgGetFdByIdAttr struct{ Id uint32 } +type ProgGetFdByIdAttr struct { + _ structs.HostLayout + Id uint32 +} func ProgGetFdById(attr *ProgGetFdByIdAttr) (*FD, error) { fd, err := BPF(BPF_PROG_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) @@ -845,6 +1411,7 @@ func ProgGetFdById(attr *ProgGetFdByIdAttr) (*FD, error) { } type ProgGetNextIdAttr struct { + _ structs.HostLayout Id uint32 NextId uint32 } @@ -855,13 +1422,14 @@ func ProgGetNextId(attr *ProgGetNextIdAttr) error { } type ProgLoadAttr struct { + _ structs.HostLayout ProgType ProgType InsnCnt uint32 - Insns Pointer - License Pointer - LogLevel uint32 + Insns TypedPointer[uint8] + License StringPointer + LogLevel LogLevel LogSize uint32 - LogBuf Pointer + LogBuf TypedPointer[uint8] KernVersion uint32 ProgFlags uint32 ProgName ObjName @@ -869,13 +1437,19 @@ type ProgLoadAttr struct { ExpectedAttachType AttachType ProgBtfFd uint32 FuncInfoRecSize uint32 - FuncInfo Pointer + FuncInfo TypedPointer[uint8] FuncInfoCnt uint32 LineInfoRecSize uint32 - LineInfo Pointer + LineInfo TypedPointer[uint8] LineInfoCnt uint32 - AttachBtfId uint32 - AttachProgFd uint32 + AttachBtfId TypeID + AttachBtfObjFd uint32 + CoreReloCnt uint32 + FdArray TypedPointer[int32] + CoreRelos TypedPointer[uint8] + CoreReloRecSize uint32 + LogTrueSize uint32 + ProgTokenFd int32 _ [4]byte } @@ -887,21 +1461,44 @@ func ProgLoad(attr *ProgLoadAttr) (*FD, error) { return NewFD(int(fd)) } +type ProgQueryAttr struct { + _ structs.HostLayout + TargetFdOrIfindex uint32 + AttachType AttachType + QueryFlags uint32 + AttachFlags uint32 + ProgIds TypedPointer[ProgramID] + Count uint32 + _ [4]byte + ProgAttachFlags TypedPointer[ProgramID] + LinkIds TypedPointer[LinkID] + LinkAttachFlags TypedPointer[LinkID] + Revision uint64 +} + +func ProgQuery(attr *ProgQueryAttr) error { + _, err := BPF(BPF_PROG_QUERY, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + type ProgRunAttr struct { + _ structs.HostLayout ProgFd uint32 Retval uint32 DataSizeIn uint32 DataSizeOut uint32 - DataIn Pointer - DataOut Pointer + DataIn TypedPointer[uint8] + DataOut TypedPointer[uint8] Repeat uint32 Duration uint32 CtxSizeIn uint32 CtxSizeOut uint32 - CtxIn Pointer - CtxOut Pointer + CtxIn TypedPointer[uint8] + CtxOut TypedPointer[uint8] Flags uint32 Cpu uint32 + BatchSize uint32 + _ [4]byte } func ProgRun(attr *ProgRunAttr) error { @@ -910,9 +1507,11 @@ func ProgRun(attr *ProgRunAttr) error { } type RawTracepointOpenAttr struct { - Name Pointer + _ structs.HostLayout + Name StringPointer ProgFd uint32 _ [4]byte + Cookie uint64 } func RawTracepointOpen(attr *RawTracepointOpenAttr) (*FD, error) { @@ -924,31 +1523,140 @@ func RawTracepointOpen(attr *RawTracepointOpenAttr) (*FD, error) { } type CgroupLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte CgroupId uint64 AttachType AttachType - _ [4]byte + _ [36]byte } type IterLinkInfo struct { - TargetName Pointer + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + TargetName TypedPointer[uint8] TargetNameLen uint32 } +type KprobeLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + PerfEventType PerfEventType + _ [4]byte + FuncName TypedPointer[uint8] + NameLen uint32 + Offset uint32 + Addr uint64 + Missed uint64 + Cookie uint64 +} + +type KprobeMultiLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Addrs TypedPointer[uint64] + Count uint32 + Flags uint32 + Missed uint64 + Cookies TypedPointer[uint64] + _ [16]byte +} + type NetNsLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte NetnsIno uint32 AttachType AttachType + _ [40]byte +} + +type NetfilterLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Pf uint32 + Hooknum uint32 + Priority int32 + Flags uint32 + _ [32]byte +} + +type NetkitLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Ifindex uint32 + AttachType AttachType + _ [40]byte +} + +type PerfEventLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + PerfEventType PerfEventType } type RawTracepointLinkInfo struct { - TpName Pointer - TpNameLen uint32 + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 _ [4]byte + TpName TypedPointer[uint8] + TpNameLen uint32 + _ [36]byte +} + +type TcxLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Ifindex uint32 + AttachType AttachType + _ [40]byte } type TracingLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte AttachType AttachType TargetObjId uint32 - TargetBtfId uint32 + TargetBtfId TypeID + _ [36]byte } -type XDPLinkInfo struct{ Ifindex uint32 } +type XDPLinkInfo struct { + _ structs.HostLayout + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Ifindex uint32 + _ [44]byte +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go new file mode 100644 index 000000000..62e483a1c --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go @@ -0,0 +1,85 @@ +package sysenc + +import ( + "unsafe" + + "github.com/cilium/ebpf/internal/sys" +) + +type Buffer struct { + ptr unsafe.Pointer + // Size of the buffer. syscallPointerOnly if created from UnsafeBuffer or when using + // zero-copy unmarshaling. + size int +} + +const syscallPointerOnly = -1 + +func newBuffer(buf []byte) Buffer { + if len(buf) == 0 { + return Buffer{} + } + return Buffer{unsafe.Pointer(&buf[0]), len(buf)} +} + +// UnsafeBuffer constructs a Buffer for zero-copy unmarshaling. +// +// [Pointer] is the only valid method to call on such a Buffer. +// Use [SyscallBuffer] instead if possible. +func UnsafeBuffer(ptr unsafe.Pointer) Buffer { + return Buffer{ptr, syscallPointerOnly} +} + +// SyscallOutput prepares a Buffer for a syscall to write into. +// +// size is the length of the desired buffer in bytes. +// The buffer may point at the underlying memory of dst, in which case [Unmarshal] +// becomes a no-op. +// +// The contents of the buffer are undefined and may be non-zero. +func SyscallOutput(dst any, size int) Buffer { + if dstBuf := unsafeBackingMemory(dst); len(dstBuf) == size { + buf := newBuffer(dstBuf) + buf.size = syscallPointerOnly + return buf + } + + return newBuffer(make([]byte, size)) +} + +// CopyTo copies the buffer into dst. +// +// Returns the number of copied bytes. +func (b Buffer) CopyTo(dst []byte) int { + return copy(dst, b.Bytes()) +} + +// AppendTo appends the buffer onto dst. +func (b Buffer) AppendTo(dst []byte) []byte { + return append(dst, b.Bytes()...) +} + +// Pointer returns the location where a syscall should write. +func (b Buffer) Pointer() sys.Pointer { + // NB: This deliberately ignores b.length to support zero-copy + // marshaling / unmarshaling using unsafe.Pointer. + return sys.UnsafePointer(b.ptr) +} + +// Unmarshal the buffer into the provided value. +func (b Buffer) Unmarshal(data any) error { + if b.size == syscallPointerOnly { + return nil + } + + return Unmarshal(data, b.Bytes()) +} + +// Bytes returns the buffer as a byte slice. Returns nil if the Buffer was +// created using UnsafeBuffer or by zero-copy unmarshaling. +func (b Buffer) Bytes() []byte { + if b.size == syscallPointerOnly { + return nil + } + return unsafe.Slice((*byte)(b.ptr), b.size) +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sysenc/doc.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sysenc/doc.go new file mode 100644 index 000000000..676ad98ba --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sysenc/doc.go @@ -0,0 +1,3 @@ +// Package sysenc provides efficient conversion of Go values to system +// call interfaces. +package sysenc diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sysenc/layout.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sysenc/layout.go new file mode 100644 index 000000000..52d111e7a --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sysenc/layout.go @@ -0,0 +1,41 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found at https://go.dev/LICENSE. + +package sysenc + +import ( + "reflect" + "sync" +) + +var hasUnexportedFieldsCache sync.Map // map[reflect.Type]bool + +func hasUnexportedFields(typ reflect.Type) bool { + switch typ.Kind() { + case reflect.Slice, reflect.Array, reflect.Pointer: + return hasUnexportedFields(typ.Elem()) + + case reflect.Struct: + if unexported, ok := hasUnexportedFieldsCache.Load(typ); ok { + return unexported.(bool) + } + + unexported := false + for i, n := 0, typ.NumField(); i < n; i++ { + field := typ.Field(i) + // Package binary allows _ fields but always writes zeroes into them. + if (!field.IsExported() && field.Name != "_") || hasUnexportedFields(field.Type) { + unexported = true + break + } + } + + hasUnexportedFieldsCache.Store(typ, unexported) + return unexported + + default: + // NB: It's not clear what this means for Chan and so on. + return false + } +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go new file mode 100644 index 000000000..3f7deb80f --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go @@ -0,0 +1,161 @@ +package sysenc + +import ( + "encoding" + "encoding/binary" + "errors" + "fmt" + "reflect" + "slices" + "unsafe" + + "github.com/cilium/ebpf/internal" +) + +// Marshal turns data into a byte slice using the system's native endianness. +// +// If possible, avoids allocations by directly using the backing memory +// of data. This means that the variable must not be modified for the lifetime +// of the returned [Buffer]. +// +// Returns an error if the data can't be turned into a byte slice according to +// the behaviour of [binary.Write]. +func Marshal(data any, size int) (Buffer, error) { + if data == nil { + return Buffer{}, errors.New("can't marshal a nil value") + } + + var buf []byte + var err error + switch value := data.(type) { + case encoding.BinaryMarshaler: + buf, err = value.MarshalBinary() + case string: + buf = unsafe.Slice(unsafe.StringData(value), len(value)) + case []byte: + buf = value + case int16: + buf = internal.NativeEndian.AppendUint16(make([]byte, 0, 2), uint16(value)) + case uint16: + buf = internal.NativeEndian.AppendUint16(make([]byte, 0, 2), value) + case int32: + buf = internal.NativeEndian.AppendUint32(make([]byte, 0, 4), uint32(value)) + case uint32: + buf = internal.NativeEndian.AppendUint32(make([]byte, 0, 4), value) + case int64: + buf = internal.NativeEndian.AppendUint64(make([]byte, 0, 8), uint64(value)) + case uint64: + buf = internal.NativeEndian.AppendUint64(make([]byte, 0, 8), value) + default: + if buf := unsafeBackingMemory(data); len(buf) == size { + return newBuffer(buf), nil + } + + buf, err = binary.Append(nil, internal.NativeEndian, value) + } + if err != nil { + return Buffer{}, err + } + + if len(buf) != size { + return Buffer{}, fmt.Errorf("%T doesn't marshal to %d bytes", data, size) + } + + return newBuffer(buf), nil +} + +// Unmarshal a byte slice in the system's native endianness into data. +// +// Returns an error if buf can't be unmarshalled according to the behaviour +// of [binary.Decode]. +func Unmarshal(data interface{}, buf []byte) error { + switch value := data.(type) { + case encoding.BinaryUnmarshaler: + return value.UnmarshalBinary(buf) + + case *string: + *value = string(buf) + return nil + + case *[]byte: + // Backwards compat: unmarshaling into a slice replaces the whole slice. + *value = slices.Clone(buf) + return nil + + default: + if dataBuf := unsafeBackingMemory(data); len(dataBuf) == len(buf) { + copy(dataBuf, buf) + return nil + } + + n, err := binary.Decode(buf, internal.NativeEndian, value) + if err != nil { + return err + } + + if n != len(buf) { + return fmt.Errorf("unmarshaling %T doesn't consume all data", data) + } + + return nil + } +} + +// unsafeBackingMemory returns the backing memory of data if it can be used +// instead of calling into package binary. +// +// Returns nil if the value is not a pointer or a slice, or if it contains +// padding or unexported fields. +func unsafeBackingMemory(data any) []byte { + if data == nil { + return nil + } + + value := reflect.ValueOf(data) + var valueSize int + switch value.Kind() { + case reflect.Pointer: + if value.IsNil() { + return nil + } + + if elemType := value.Type().Elem(); elemType.Kind() != reflect.Slice { + valueSize = int(elemType.Size()) + break + } + + // We're dealing with a pointer to a slice. Dereference and + // handle it like a regular slice. + value = value.Elem() + fallthrough + + case reflect.Slice: + valueSize = int(value.Type().Elem().Size()) * value.Len() + + default: + // Prevent Value.UnsafePointer from panicking. + return nil + } + + // Some nil pointer types currently crash binary.Size. Call it after our own + // code so that the panic isn't reachable. + // See https://github.com/golang/go/issues/60892 + if size := binary.Size(data); size == -1 || size != valueSize { + // The type contains padding or unsupported types. + return nil + } + + if hasUnexportedFields(reflect.TypeOf(data)) { + return nil + } + + // Reinterpret the pointer as a byte slice. This violates the unsafe.Pointer + // rules because it's very unlikely that the source data has "an equivalent + // memory layout". However, we can make it safe-ish because of the + // following reasons: + // - There is no alignment mismatch since we cast to a type with an + // alignment of 1. + // - There are no pointers in the source type so we don't upset the GC. + // - The length is verified at runtime. + return unsafe.Slice((*byte)(value.UnsafePointer()), valueSize) +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/testutils/testmain/fd_trace.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/testutils/testmain/fd_trace.go new file mode 100644 index 000000000..c47acf89c --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/testutils/testmain/fd_trace.go @@ -0,0 +1,103 @@ +package testmain + +import ( + "bytes" + "fmt" + "os" + "runtime" + "sync" + "sync/atomic" +) + +// foundLeak is atomic since the GC may collect objects in parallel. +var foundLeak atomic.Bool + +func onLeakFD(fs *runtime.Frames) { + foundLeak.Store(true) + fmt.Fprintln(os.Stderr, "leaked fd created at:") + fmt.Fprintln(os.Stderr, formatFrames(fs)) +} + +// fds is a registry of all file descriptors wrapped into sys.fds that were +// created while an fd tracer was active. +var fds *sync.Map // map[int]*runtime.Frames + +// TraceFD associates raw with the current execution stack. +// +// skip controls how many entries of the stack the function should skip. +func TraceFD(raw int, skip int) { + if fds == nil { + return + } + + // Attempt to store the caller's stack for the given fd value. + // Panic if fds contains an existing stack for the fd. + old, exist := fds.LoadOrStore(raw, callersFrames(skip)) + if exist { + f := old.(*runtime.Frames) + panic(fmt.Sprintf("found existing stack for fd %d:\n%s", raw, formatFrames(f))) + } +} + +// ForgetFD removes any existing association for raw. +func ForgetFD(raw int) { + if fds != nil { + fds.Delete(raw) + } +} + +// LeakFD indicates that raw was leaked. +// +// Calling the function with a value that was not passed to [TraceFD] before +// is undefined. +func LeakFD(raw int) { + if fds == nil { + return + } + + // Invoke the fd leak callback. Calls LoadAndDelete to guarantee the callback + // is invoked at most once for one sys.FD allocation, runtime.Frames can only + // be unwound once. + f, ok := fds.LoadAndDelete(raw) + if ok { + onLeakFD(f.(*runtime.Frames)) + } +} + +// flushFrames removes all elements from fds and returns them as a slice. This +// deals with the fact that a runtime.Frames can only be unwound once using +// Next(). +func flushFrames() []*runtime.Frames { + var frames []*runtime.Frames + fds.Range(func(key, value any) bool { + frames = append(frames, value.(*runtime.Frames)) + fds.Delete(key) + return true + }) + return frames +} + +func callersFrames(skip int) *runtime.Frames { + c := make([]uintptr, 32) + + // Skip runtime.Callers and this function. + i := runtime.Callers(skip+2, c) + if i == 0 { + return nil + } + + return runtime.CallersFrames(c) +} + +// formatFrames formats a runtime.Frames as a human-readable string. +func formatFrames(fs *runtime.Frames) string { + var b bytes.Buffer + for { + f, more := fs.Next() + b.WriteString(fmt.Sprintf("\t%s+%#x\n\t\t%s:%d\n", f.Function, f.PC-f.Entry, f.File, f.Line)) + if !more { + break + } + } + return b.String() +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/testutils/testmain/main.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/testutils/testmain/main.go new file mode 100644 index 000000000..53de97c86 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/testutils/testmain/main.go @@ -0,0 +1,58 @@ +package testmain + +import ( + "flag" + "fmt" + "os" + "sync" + + "github.com/cilium/ebpf/internal/platform" +) + +type testingM interface { + Run() int +} + +// Run m with various debug aids enabled. +// +// The function calls [os.Exit] and does not return. +func Run(m testingM) { + const traceLogFlag = "trace-log" + + var ts *traceSession + if platform.IsWindows { + traceLog := flag.Bool(traceLogFlag, false, "Output a trace of eBPF runtime activity") + flag.Parse() + + if *traceLog { + var err error + ts, err = newTraceSession() + if err != nil { + fmt.Fprintln(os.Stderr, "Disabling trace logging:", err) + } + } + } + defer ts.Close() + + fds = new(sync.Map) + ret := m.Run() + + for _, f := range flushFrames() { + onLeakFD(f) + } + + if foundLeak.Load() { + ret = 99 + } + + if err := ts.Dump(os.Stderr); err != nil { + fmt.Fprintln(os.Stderr, "Error while dumping trace log:", err) + ret = 99 + } + + if platform.IsWindows && ret != 0 && ts == nil { + fmt.Fprintf(os.Stderr, "Consider enabling trace logging with -%s\n", traceLogFlag) + } + + os.Exit(ret) +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/testutils/testmain/windows.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/testutils/testmain/windows.go new file mode 100644 index 000000000..533af9dbb --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/testutils/testmain/windows.go @@ -0,0 +1,219 @@ +package testmain + +import ( + "encoding/xml" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "slices" + "strconv" + "strings" + "text/tabwriter" +) + +type tracelogKeywords uint64 + +// Know tracelog keywords. +// +// See https://github.com/microsoft/ebpf-for-windows/blob/main/libs/shared/ebpf_tracelog.h +var allKeywords = []string{ + "entry-exit", + "base", + "error", + "epoch", + "core", + "link", + "map", + "program", + "api", + "printk", + "native", +} + +func (kw *tracelogKeywords) UnmarshalText(text []byte) error { + decoded, err := strconv.ParseUint(string(text), 0, 64) + if err != nil { + return fmt.Errorf("foo: %w", err) + } + *kw = tracelogKeywords(decoded) + return nil +} + +func (kw tracelogKeywords) decode() []string { + var keywords []string + for _, keyword := range allKeywords { + if kw&1 > 0 { + keywords = append(keywords, keyword) + } + kw >>= 1 + } + if kw > 0 { + keywords = append(keywords, fmt.Sprintf("0x%x", kw)) + } + return keywords +} + +type traceSession struct { + session string +} + +// newTraceSession starts a trace log for eBPF for Windows related events. +// +// * https://github.com/microsoft/ebpf-for-windows/blob/main/docs/GettingStarted.md#using-tracing +// * https://devblogs.microsoft.com/performance-diagnostics/controlling-the-event-session-name-with-the-instance-name/ and +func newTraceSession() (*traceSession, error) { + def := filepath.Join(os.Getenv("ProgramFiles"), "ebpf-for-windows\\ebpfforwindows.wprp") + if _, err := os.Stat(def); err != nil { + return nil, err + } + + session := fmt.Sprintf("epbf-go-%d", os.Getpid()) + wpr := exec.Command("wpr.exe", "-start", def, "-filemode", "-instancename", session) + wpr.Stderr = os.Stderr + if err := wpr.Run(); err != nil { + return nil, err + } + + return &traceSession{session}, nil +} + +func (ts *traceSession) Close() error { + if ts == nil { + return nil + } + + return ts.stop(os.DevNull) +} + +func (ts *traceSession) stop(file string) error { + if ts.session == "" { + return nil + } + + wpr := exec.Command("wpr.exe", "-stop", file, "-instancename", ts.session) + if err := wpr.Run(); err != nil { + return err + } + + ts.session = "" + return nil +} + +func (ts *traceSession) Dump(w io.Writer) error { + if ts == nil { + return nil + } + + path, err := os.MkdirTemp("", "ebpf-go-trace") + if err != nil { + return err + } + defer os.RemoveAll(path) + + trace := filepath.Join(path, "trace.etl") + if err := ts.stop(trace); err != nil { + return fmt.Errorf("write trace: %w", err) + } + + netsh := exec.Command("netsh.exe", "trace", "convert", trace, "dump=XML") + if err := netsh.Run(); err != nil { + return err + } + + f, err := os.Open(filepath.Join(path, "trace.xml")) + if err != nil { + return err + } + defer f.Close() + + return summariseWPRTrace(f, w) +} + +func summariseWPRTrace(r io.Reader, w io.Writer) error { + type nameValue struct { + Name string `xml:"Name,attr"` + Value string `xml:",chardata"` + } + + type event struct { + XMLName xml.Name `xml:"Event"` + System struct { + Provider struct { + Name string `xml:"Name,attr"` + } `xml:"Provider"` + TimeCreated struct { + SystemTime string `xml:"SystemTime,attr"` + } `xml:"TimeCreated"` + Keywords tracelogKeywords `xml:"Keywords"` + Level uint64 `xml:"Level"` + } `xml:"System"` + EventData struct { + Data []nameValue `xml:"Data"` + } `xml:"EventData"` + RenderingInfo struct { + Task string `xml:"Task"` + } `xml:"RenderingInfo"` + } + + var events struct { + Events []event `xml:"Event"` + } + + err := xml.NewDecoder(r).Decode(&events) + if err != nil { + return fmt.Errorf("unmarshal trace XML: %w", err) + } + + tw := tabwriter.NewWriter(w, 0, 0, 1, ' ', 0) + for _, event := range events.Events { + if !strings.Contains(event.System.Provider.Name, "Ebpf") { + continue + } + + flag := " " + // See https://learn.microsoft.com/en-us/windows/win32/api/traceloggingprovider/nf-traceloggingprovider-tracelogginglevel#remarks + if event.System.Level > 0 && event.System.Level <= 3 { + flag = "!" + } + + kw := event.System.Keywords.decode() + fmt.Fprintf(tw, "%s\t%s\t", flag, strings.Join(kw, ",")) + + data := event.EventData.Data + slices.SortFunc(data, func(a, b nameValue) int { + return strings.Compare(a.Name, b.Name) + }) + + var first string + for _, name := range []string{ + "Entry", + "Message", + "ErrorMessage", + } { + i := slices.IndexFunc(data, func(kv nameValue) bool { + return kv.Name == name + }) + + if i == -1 { + continue + } + + first = data[i].Value + data = slices.Delete(data, i, i+1) + break + } + + // NB: This may be empty. + fmt.Fprintf(tw, "%s\t", first) + + for _, data := range data { + fmt.Fprintf(tw, "%s=%s\t", data.Name, data.Value) + } + + fmt.Fprintln(tw) + } + + return tw.Flush() +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go new file mode 100644 index 000000000..d0b5be66c --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go @@ -0,0 +1,378 @@ +package tracefs + +import ( + "crypto/rand" + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "syscall" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/linux" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/unix" +) + +var ( + ErrInvalidInput = errors.New("invalid input") + + ErrInvalidMaxActive = errors.New("can only set maxactive on kretprobes") +) + +//go:generate go tool stringer -type=ProbeType -linecomment + +type ProbeType uint8 + +const ( + Kprobe ProbeType = iota // kprobe + Uprobe // uprobe +) + +func (pt ProbeType) eventsFile() (*os.File, error) { + path, err := sanitizeTracefsPath(fmt.Sprintf("%s_events", pt.String())) + if err != nil { + return nil, err + } + + return os.OpenFile(path, os.O_APPEND|os.O_WRONLY, 0666) +} + +type ProbeArgs struct { + Type ProbeType + Symbol, Group, Path string + Offset, RefCtrOffset, Cookie uint64 + Pid, RetprobeMaxActive int + Ret bool +} + +// RandomGroup generates a pseudorandom string for use as a tracefs group name. +// Returns an error when the output string would exceed 63 characters (kernel +// limitation), when rand.Read() fails or when prefix contains characters not +// allowed by IsValidTraceID. +func RandomGroup(prefix string) (string, error) { + if !validIdentifier(prefix) { + return "", fmt.Errorf("prefix '%s' must be alphanumeric or underscore: %w", prefix, ErrInvalidInput) + } + + b := make([]byte, 8) + if _, err := rand.Read(b); err != nil { + return "", fmt.Errorf("reading random bytes: %w", err) + } + + group := fmt.Sprintf("%s_%x", prefix, b) + if len(group) > 63 { + return "", fmt.Errorf("group name '%s' cannot be longer than 63 characters: %w", group, ErrInvalidInput) + } + + return group, nil +} + +// validIdentifier implements the equivalent of a regex match +// against "^[a-zA-Z_][0-9a-zA-Z_-]*$". +// +// Trace event groups, names and kernel symbols must adhere to this set of +// characters. Non-empty, first character must not be a number or hyphen, all +// characters must be alphanumeric, underscore or hyphen. +func validIdentifier(s string) bool { + if len(s) < 1 { + return false + } + for i, c := range []byte(s) { + switch { + case c >= 'a' && c <= 'z': + case c >= 'A' && c <= 'Z': + case c == '_': + case i > 0 && (c == '-' || c >= '0' && c <= '9'): + + default: + return false + } + } + + return true +} + +func sanitizeTracefsPath(path ...string) (string, error) { + base, err := getTracefsPath() + if err != nil { + return "", err + } + l := filepath.Join(path...) + p := filepath.Join(base, l) + if !strings.HasPrefix(p, base) { + return "", fmt.Errorf("path '%s' attempts to escape base path '%s': %w", l, base, ErrInvalidInput) + } + return p, nil +} + +// getTracefsPath will return a correct path to the tracefs mount point. +// Since kernel 4.1 tracefs should be mounted by default at /sys/kernel/tracing, +// but may be also be available at /sys/kernel/debug/tracing if debugfs is mounted. +// The available tracefs paths will depends on distribution choices. +var getTracefsPath = sync.OnceValues(func() (string, error) { + if !platform.IsLinux { + return "", fmt.Errorf("tracefs: %w", internal.ErrNotSupportedOnOS) + } + + for _, p := range []struct { + path string + fsType int64 + }{ + {"/sys/kernel/tracing", unix.TRACEFS_MAGIC}, + {"/sys/kernel/debug/tracing", unix.TRACEFS_MAGIC}, + // RHEL/CentOS + {"/sys/kernel/debug/tracing", unix.DEBUGFS_MAGIC}, + } { + if fsType, err := linux.FSType(p.path); err == nil && fsType == p.fsType { + return p.path, nil + } + } + + return "", errors.New("neither debugfs nor tracefs are mounted") +}) + +// sanitizeIdentifier replaces every invalid character for the tracefs api with an underscore. +// +// It is equivalent to calling regexp.MustCompile("[^a-zA-Z0-9]+").ReplaceAllString("_"). +func sanitizeIdentifier(s string) string { + var skip bool + return strings.Map(func(c rune) rune { + switch { + case c >= 'a' && c <= 'z', + c >= 'A' && c <= 'Z', + c >= '0' && c <= '9': + skip = false + return c + + case skip: + return -1 + + default: + skip = true + return '_' + } + }, s) +} + +// EventID reads a trace event's ID from tracefs given its group and name. +// The kernel requires group and name to be alphanumeric or underscore. +func EventID(group, name string) (uint64, error) { + if !validIdentifier(group) { + return 0, fmt.Errorf("invalid tracefs group: %q", group) + } + + if !validIdentifier(name) { + return 0, fmt.Errorf("invalid tracefs name: %q", name) + } + + path, err := sanitizeTracefsPath("events", group, name, "id") + if err != nil { + return 0, err + } + tid, err := internal.ReadUint64FromFile("%d\n", path) + if errors.Is(err, os.ErrNotExist) { + return 0, err + } + if err != nil { + return 0, fmt.Errorf("reading trace event ID of %s/%s: %w", group, name, err) + } + + return tid, nil +} + +func probePrefix(ret bool, maxActive int) string { + if ret { + if maxActive > 0 { + return fmt.Sprintf("r%d", maxActive) + } + return "r" + } + return "p" +} + +// Event represents an entry in a tracefs probe events file. +type Event struct { + typ ProbeType + group, name string + // event id allocated by the kernel. 0 if the event has already been removed. + id uint64 + + cleanup runtime.Cleanup +} + +// NewEvent creates a new ephemeral trace event. +// +// Returns os.ErrNotExist if symbol is not a valid +// kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist +// if a probe with the same group and symbol already exists. Returns an error if +// args.RetprobeMaxActive is used on non kprobe types. Returns ErrNotSupported if +// the kernel is too old to support kretprobe maxactive. +func NewEvent(args ProbeArgs) (*Event, error) { + // Before attempting to create a trace event through tracefs, + // check if an event with the same group and name already exists. + // Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate + // entry, so we need to rely on reads for detecting uniqueness. + eventName := sanitizeIdentifier(args.Symbol) + _, err := EventID(args.Group, eventName) + if err == nil { + return nil, fmt.Errorf("trace event %s/%s: %w", args.Group, eventName, os.ErrExist) + } + if errors.Is(err, unix.EINVAL) { + return nil, fmt.Errorf("trace event %s/%s: %w (unknown symbol?)", args.Group, eventName, err) + } + if !errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("checking trace event %s/%s: %w", args.Group, eventName, err) + } + + // Open the kprobe_events file in tracefs. + f, err := args.Type.eventsFile() + if err != nil { + return nil, err + } + defer f.Close() + + var pe, token string + switch args.Type { + case Kprobe: + // The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt): + // p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe + // r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe + // -:[GRP/]EVENT : Clear a probe + // + // Some examples: + // r:ebpf_1234/r_my_kretprobe nf_conntrack_destroy + // p:ebpf_5678/p_my_kprobe __x64_sys_execve + // + // Leaving the kretprobe's MAXACTIVE set to 0 (or absent) will make the + // kernel default to NR_CPUS. This is desired in most eBPF cases since + // subsampling or rate limiting logic can be more accurately implemented in + // the eBPF program itself. + // See Documentation/kprobes.txt for more details. + if args.RetprobeMaxActive != 0 && !args.Ret { + return nil, ErrInvalidMaxActive + } + token = KprobeToken(args) + pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.Ret, args.RetprobeMaxActive), args.Group, eventName, token) + case Uprobe: + // The uprobe_events syntax is as follows: + // p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe + // r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a return probe + // -:[GRP/]EVENT : Clear a probe + // + // Some examples: + // r:ebpf_1234/readline /bin/bash:0x12345 + // p:ebpf_5678/main_mySymbol /bin/mybin:0x12345(0x123) + // + // See Documentation/trace/uprobetracer.txt for more details. + if args.RetprobeMaxActive != 0 { + return nil, ErrInvalidMaxActive + } + token = UprobeToken(args) + pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.Ret, 0), args.Group, eventName, token) + } + _, err = f.WriteString(pe) + + // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL + // when trying to create a retprobe for a missing symbol. + if errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("token %s: not found: %w", token, err) + } + // Since commit ab105a4fb894, EILSEQ is returned when a kprobe sym+offset is resolved + // to an invalid insn boundary. The exact conditions that trigger this error are + // arch specific however. + if errors.Is(err, syscall.EILSEQ) { + return nil, fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist) + } + // ERANGE is returned when the `SYM[+offs]` token is too big and cannot + // be resolved. + if errors.Is(err, syscall.ERANGE) { + return nil, fmt.Errorf("token %s: offset too big: %w", token, os.ErrNotExist) + } + + if err != nil { + return nil, fmt.Errorf("token %s: writing '%s': %w", token, pe, err) + } + + // Get the newly-created trace event's id. + tid, err := EventID(args.Group, eventName) + if args.RetprobeMaxActive != 0 && errors.Is(err, os.ErrNotExist) { + // Kernels < 4.12 don't support maxactive and therefore auto generate + // group and event names from the symbol and offset. The symbol is used + // without any sanitization. + // See https://elixir.bootlin.com/linux/v4.10/source/kernel/trace/trace_kprobe.c#L712 + event := fmt.Sprintf("kprobes/r_%s_%d", args.Symbol, args.Offset) + if err := removeEvent(args.Type, event); err != nil { + return nil, fmt.Errorf("failed to remove spurious maxactive event: %s", err) + } + + return nil, &internal.UnsupportedFeatureError{ + MinimumVersion: internal.Version{4, 12}, + Name: "trace event with non-default maxactive", + } + } + if err != nil { + return nil, fmt.Errorf("get trace event id: %w", err) + } + + evt := &Event{typ: args.Type, group: args.Group, name: eventName, id: tid} + evt.cleanup = runtime.AddCleanup(evt, func(*byte) { + _ = removeEvent(args.Type, fmt.Sprintf("%s/%s", args.Group, eventName)) + }, nil) + + return evt, nil +} + +// Close removes the event from tracefs. +// +// Returns os.ErrClosed if the event has already been closed before. +func (evt *Event) Close() error { + if evt.id == 0 { + return os.ErrClosed + } + + evt.id = 0 + evt.cleanup.Stop() + pe := fmt.Sprintf("%s/%s", evt.group, evt.name) + return removeEvent(evt.typ, pe) +} + +func removeEvent(typ ProbeType, pe string) error { + f, err := typ.eventsFile() + if err != nil { + return err + } + defer f.Close() + + // See [k,u]probe_events syntax above. The probe type does not need to be specified + // for removals. + if _, err = f.WriteString("-:" + pe); err != nil { + return fmt.Errorf("remove event %q from %s: %w", pe, f.Name(), err) + } + + return nil +} + +// ID returns the tracefs ID associated with the event. +func (evt *Event) ID() uint64 { + return evt.id +} + +// Group returns the tracefs group used by the event. +func (evt *Event) Group() string { + return evt.group +} + +// KprobeToken creates the SYM[+offs] token for the tracefs api. +func KprobeToken(args ProbeArgs) string { + po := args.Symbol + + if args.Offset != 0 { + po += fmt.Sprintf("+%#x", args.Offset) + } + + return po +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go new file mode 100644 index 000000000..ed8471a89 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=ProbeType -linecomment"; DO NOT EDIT. + +package tracefs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Kprobe-0] + _ = x[Uprobe-1] +} + +const _ProbeType_name = "kprobeuprobe" + +var _ProbeType_index = [...]uint8{0, 6, 12} + +func (i ProbeType) String() string { + idx := int(i) - 0 + if i < 0 || idx >= len(_ProbeType_index)-1 { + return "ProbeType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ProbeType_name[_ProbeType_index[idx]:_ProbeType_index[idx+1]] +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go new file mode 100644 index 000000000..994f31260 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go @@ -0,0 +1,16 @@ +package tracefs + +import "fmt" + +// UprobeToken creates the PATH:OFFSET(REF_CTR_OFFSET) token for the tracefs api. +func UprobeToken(args ProbeArgs) string { + po := fmt.Sprintf("%s:%#x", args.Path, args.Offset) + + if args.RefCtrOffset != 0 { + // This is not documented in Documentation/trace/uprobetracer.txt. + // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/trace/trace.c#L5564 + po += fmt.Sprintf("(%#x)", args.RefCtrOffset) + } + + return po +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/doc.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/doc.go new file mode 100644 index 000000000..d168d36f1 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/doc.go @@ -0,0 +1,11 @@ +// Package unix re-exports Linux specific parts of golang.org/x/sys/unix. +// +// It avoids breaking compilation on other OS by providing stubs as follows: +// - Invoking a function always returns an error. +// - Errnos have distinct, non-zero values. +// - Constants have distinct but meaningless values. +// - Types use the same names for members, but may or may not follow the +// Linux layout. +package unix + +// Note: please don't add any custom API to this package. Use internal/sys instead. diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/errno_linux.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/errno_linux.go new file mode 100644 index 000000000..0c4886bd1 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/errno_linux.go @@ -0,0 +1,29 @@ +package unix + +import ( + "syscall" + + linux "golang.org/x/sys/unix" +) + +type Errno = syscall.Errno + +const ( + E2BIG = linux.E2BIG + EACCES = linux.EACCES + EAGAIN = linux.EAGAIN + EBADF = linux.EBADF + EEXIST = linux.EEXIST + EFAULT = linux.EFAULT + EILSEQ = linux.EILSEQ + EINTR = linux.EINTR + EINVAL = linux.EINVAL + ENODEV = linux.ENODEV + ENOENT = linux.ENOENT + ENOSPC = linux.ENOSPC + EOPNOTSUPP = linux.EOPNOTSUPP + EPERM = linux.EPERM + EPOLLIN = linux.EPOLLIN + ESRCH = linux.ESRCH + ESTALE = linux.ESTALE +) diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/errno_other.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/errno_other.go new file mode 100644 index 000000000..fc2b042b5 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/errno_other.go @@ -0,0 +1,29 @@ +//go:build !linux && !windows + +package unix + +import "syscall" + +type Errno = syscall.Errno + +// Errnos are distinct and non-zero. +const ( + E2BIG Errno = iota + 1 + EACCES + EAGAIN + EBADF + EEXIST + EFAULT + EILSEQ + EINTR + EINVAL + ENODEV + ENOENT + ENOSPC + ENOTSUP + ENOTSUPP + EOPNOTSUPP + EPERM + ESRCH + ESTALE +) diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/errno_string_windows.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/errno_string_windows.go new file mode 100644 index 000000000..6077e983f --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/errno_string_windows.go @@ -0,0 +1,59 @@ +// Code generated by "stringer -type=Errno -tags=windows -output=errno_string_windows.go"; DO NOT EDIT. + +package unix + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[EPERM-1] + _ = x[ENOENT-2] + _ = x[ESRCH-3] + _ = x[EINTR-4] + _ = x[E2BIG-7] + _ = x[EBADF-9] + _ = x[EAGAIN-11] + _ = x[EACCES-13] + _ = x[EFAULT-14] + _ = x[EEXIST-17] + _ = x[ENODEV-19] + _ = x[EINVAL-22] + _ = x[ENOSPC-28] + _ = x[EILSEQ-42] + _ = x[ENOTSUP-129] + _ = x[EOPNOTSUPP-130] + _ = x[ENOTSUPP-536870912] + _ = x[ESTALE-536870913] +} + +const _Errno_name = "EPERMENOENTESRCHEINTRE2BIGEBADFEAGAINEACCESEFAULTEEXISTENODEVEINVALENOSPCEILSEQENOTSUPEOPNOTSUPPENOTSUPPESTALE" + +var _Errno_map = map[Errno]string{ + 1: _Errno_name[0:5], + 2: _Errno_name[5:11], + 3: _Errno_name[11:16], + 4: _Errno_name[16:21], + 7: _Errno_name[21:26], + 9: _Errno_name[26:31], + 11: _Errno_name[31:37], + 13: _Errno_name[37:43], + 14: _Errno_name[43:49], + 17: _Errno_name[49:55], + 19: _Errno_name[55:61], + 22: _Errno_name[61:67], + 28: _Errno_name[67:73], + 42: _Errno_name[73:79], + 129: _Errno_name[79:86], + 130: _Errno_name[86:96], + 536870912: _Errno_name[96:104], + 536870913: _Errno_name[104:110], +} + +func (i Errno) String() string { + if str, ok := _Errno_map[i]; ok { + return str + } + return "Errno(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/errno_windows.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/errno_windows.go new file mode 100644 index 000000000..266e43daa --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/errno_windows.go @@ -0,0 +1,79 @@ +package unix + +// The code in this file is derived from syscall_unix.go in the Go source code, +// licensed under the MIT license. + +import ( + "errors" + "os" + "syscall" +) + +//go:generate go tool stringer -type=Errno -tags=windows -output=errno_string_windows.go + +// Windows specific constants for Unix errnos. +// +// The values do not always match Linux, for example EILSEQ and EOPNOTSUPP. +// +// See https://learn.microsoft.com/en-us/cpp/c-runtime-library/errno-constants?view=msvc-170 +const ( + EPERM Errno = 1 + ENOENT Errno = 2 + ESRCH Errno = 3 + EINTR Errno = 4 + E2BIG Errno = 7 + EBADF Errno = 9 + EAGAIN Errno = 11 + EACCES Errno = 13 + EFAULT Errno = 14 + EEXIST Errno = 17 + ENODEV Errno = 19 + EINVAL Errno = 22 + ENFILE Errno = 23 + EMFILE Errno = 24 + ENOSPC Errno = 28 + ENOSYS Errno = 40 + ENOTEMPTY Errno = 41 + EILSEQ Errno = 42 + ENOTSUP Errno = 129 + EOPNOTSUPP Errno = 130 + EOTHER Errno = 131 + ETIMEDOUT Errno = 138 + EWOULDBLOCK Errno = 140 +) + +// These constants do not exist on Windows and therefore have a non-zero +// dummy value. +const ( + ENOTSUPP Errno = Errno(syscall.APPLICATION_ERROR) + iota + ESTALE +) + +// Errno is a Windows compatibility shim for Unix errnos. +type Errno uintptr + +func (e Errno) Error() string { + return e.String() +} + +func (e Errno) Is(target error) bool { + switch target { + case os.ErrPermission: + return e == EACCES || e == EPERM + case os.ErrExist: + return e == EEXIST || e == ENOTEMPTY + case os.ErrNotExist: + return e == ENOENT + case errors.ErrUnsupported: + return e == ENOSYS || e == ENOTSUP || e == EOPNOTSUPP + } + return false +} + +func (e Errno) Temporary() bool { + return e == EINTR || e == EMFILE || e == ENFILE || e.Timeout() +} + +func (e Errno) Timeout() bool { + return e == EAGAIN || e == EWOULDBLOCK || e == ETIMEDOUT +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/error.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/error.go new file mode 100644 index 000000000..48017c100 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/error.go @@ -0,0 +1,23 @@ +package unix + +import ( + "fmt" + "runtime" + "strings" + + "github.com/cilium/ebpf/internal" +) + +// errNonLinux returns an error which wraps [internal.ErrNotSupportedOnOS] and +// includes the name of the calling function. +func errNonLinux() error { + name := "unknown" + pc, _, _, ok := runtime.Caller(1) + if ok { + name = runtime.FuncForPC(pc).Name() + if pos := strings.LastIndexByte(name, '.'); pos != -1 { + name = name[pos+1:] + } + } + return fmt.Errorf("unix: %s: %w", name, internal.ErrNotSupportedOnOS) +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/strings_other.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/strings_other.go new file mode 100644 index 000000000..76f367aa8 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/strings_other.go @@ -0,0 +1,15 @@ +//go:build !linux && !windows + +package unix + +func BytePtrFromString(s string) (*byte, error) { + return nil, errNonLinux() +} + +func ByteSliceToString(s []byte) string { + return "" +} + +func ByteSliceFromString(s string) ([]byte, error) { + return nil, errNonLinux() +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/strings_windows.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/strings_windows.go new file mode 100644 index 000000000..00af5a968 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/strings_windows.go @@ -0,0 +1,23 @@ +package unix + +import ( + "syscall" + + "golang.org/x/sys/windows" +) + +func BytePtrFromString(s string) (*byte, error) { + p, err := windows.BytePtrFromString(s) + if err == syscall.EINVAL { + err = EINVAL + } + return p, err +} + +func ByteSliceToString(s []byte) string { + return windows.ByteSliceToString(s) +} + +func ByteSliceFromString(s string) ([]byte, error) { + return windows.ByteSliceFromString(s) +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go index 0a7c648a6..14a0a1929 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go @@ -1,197 +1,184 @@ //go:build linux -// +build linux package unix import ( "syscall" + "unsafe" linux "golang.org/x/sys/unix" ) const ( - ENOENT = linux.ENOENT - EEXIST = linux.EEXIST - EAGAIN = linux.EAGAIN - ENOSPC = linux.ENOSPC - EINVAL = linux.EINVAL - EPOLLIN = linux.EPOLLIN - EINTR = linux.EINTR - EPERM = linux.EPERM - ESRCH = linux.ESRCH - ENODEV = linux.ENODEV - EBADF = linux.EBADF - E2BIG = linux.E2BIG - EFAULT = linux.EFAULT - // ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP - ENOTSUPP = syscall.Errno(0x20c) - - BPF_F_NO_PREALLOC = linux.BPF_F_NO_PREALLOC - BPF_F_NUMA_NODE = linux.BPF_F_NUMA_NODE - BPF_F_RDONLY = linux.BPF_F_RDONLY - BPF_F_WRONLY = linux.BPF_F_WRONLY - BPF_F_RDONLY_PROG = linux.BPF_F_RDONLY_PROG - BPF_F_WRONLY_PROG = linux.BPF_F_WRONLY_PROG - BPF_F_SLEEPABLE = linux.BPF_F_SLEEPABLE - BPF_F_MMAPABLE = linux.BPF_F_MMAPABLE - BPF_F_INNER_MAP = linux.BPF_F_INNER_MAP - BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN - BPF_TAG_SIZE = linux.BPF_TAG_SIZE - BPF_RINGBUF_BUSY_BIT = linux.BPF_RINGBUF_BUSY_BIT - BPF_RINGBUF_DISCARD_BIT = linux.BPF_RINGBUF_DISCARD_BIT - BPF_RINGBUF_HDR_SZ = linux.BPF_RINGBUF_HDR_SZ - SYS_BPF = linux.SYS_BPF - F_DUPFD_CLOEXEC = linux.F_DUPFD_CLOEXEC - EPOLL_CTL_ADD = linux.EPOLL_CTL_ADD - EPOLL_CLOEXEC = linux.EPOLL_CLOEXEC - O_CLOEXEC = linux.O_CLOEXEC - O_NONBLOCK = linux.O_NONBLOCK - PROT_READ = linux.PROT_READ - PROT_WRITE = linux.PROT_WRITE - MAP_SHARED = linux.MAP_SHARED - PERF_ATTR_SIZE_VER1 = linux.PERF_ATTR_SIZE_VER1 - PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE - PERF_TYPE_TRACEPOINT = linux.PERF_TYPE_TRACEPOINT - PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT - PERF_EVENT_IOC_DISABLE = linux.PERF_EVENT_IOC_DISABLE - PERF_EVENT_IOC_ENABLE = linux.PERF_EVENT_IOC_ENABLE - PERF_EVENT_IOC_SET_BPF = linux.PERF_EVENT_IOC_SET_BPF - PerfBitWatermark = linux.PerfBitWatermark - PERF_SAMPLE_RAW = linux.PERF_SAMPLE_RAW - PERF_FLAG_FD_CLOEXEC = linux.PERF_FLAG_FD_CLOEXEC - RLIM_INFINITY = linux.RLIM_INFINITY - RLIMIT_MEMLOCK = linux.RLIMIT_MEMLOCK - BPF_STATS_RUN_TIME = linux.BPF_STATS_RUN_TIME - PERF_RECORD_LOST = linux.PERF_RECORD_LOST - PERF_RECORD_SAMPLE = linux.PERF_RECORD_SAMPLE - AT_FDCWD = linux.AT_FDCWD - RENAME_NOREPLACE = linux.RENAME_NOREPLACE - SO_ATTACH_BPF = linux.SO_ATTACH_BPF - SO_DETACH_BPF = linux.SO_DETACH_BPF - SOL_SOCKET = linux.SOL_SOCKET + BPF_F_NO_PREALLOC = linux.BPF_F_NO_PREALLOC + BPF_F_NUMA_NODE = linux.BPF_F_NUMA_NODE + BPF_F_RDONLY = linux.BPF_F_RDONLY + BPF_F_WRONLY = linux.BPF_F_WRONLY + BPF_F_RDONLY_PROG = linux.BPF_F_RDONLY_PROG + BPF_F_WRONLY_PROG = linux.BPF_F_WRONLY_PROG + BPF_F_SLEEPABLE = linux.BPF_F_SLEEPABLE + BPF_F_XDP_HAS_FRAGS = linux.BPF_F_XDP_HAS_FRAGS + BPF_F_MMAPABLE = linux.BPF_F_MMAPABLE + BPF_F_INNER_MAP = linux.BPF_F_INNER_MAP + BPF_F_KPROBE_MULTI_RETURN = linux.BPF_F_KPROBE_MULTI_RETURN + BPF_F_UPROBE_MULTI_RETURN = linux.BPF_F_UPROBE_MULTI_RETURN + BPF_F_LOCK = linux.BPF_F_LOCK + BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN + BPF_TAG_SIZE = linux.BPF_TAG_SIZE + BPF_RINGBUF_BUSY_BIT = linux.BPF_RINGBUF_BUSY_BIT + BPF_RINGBUF_DISCARD_BIT = linux.BPF_RINGBUF_DISCARD_BIT + BPF_RINGBUF_HDR_SZ = linux.BPF_RINGBUF_HDR_SZ + SYS_BPF = linux.SYS_BPF + F_DUPFD_CLOEXEC = linux.F_DUPFD_CLOEXEC + EPOLL_CTL_ADD = linux.EPOLL_CTL_ADD + EPOLL_CLOEXEC = linux.EPOLL_CLOEXEC + O_CLOEXEC = linux.O_CLOEXEC + O_NONBLOCK = linux.O_NONBLOCK + PROT_NONE = linux.PROT_NONE + PROT_READ = linux.PROT_READ + PROT_WRITE = linux.PROT_WRITE + MAP_ANON = linux.MAP_ANON + MAP_SHARED = linux.MAP_SHARED + MAP_FIXED = linux.MAP_FIXED + MAP_PRIVATE = linux.MAP_PRIVATE + PERF_ATTR_SIZE_VER1 = linux.PERF_ATTR_SIZE_VER1 + PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE + PERF_TYPE_TRACEPOINT = linux.PERF_TYPE_TRACEPOINT + PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT + PERF_EVENT_IOC_DISABLE = linux.PERF_EVENT_IOC_DISABLE + PERF_EVENT_IOC_ENABLE = linux.PERF_EVENT_IOC_ENABLE + PERF_EVENT_IOC_SET_BPF = linux.PERF_EVENT_IOC_SET_BPF + PerfBitWatermark = linux.PerfBitWatermark + PerfBitWriteBackward = linux.PerfBitWriteBackward + PERF_SAMPLE_RAW = linux.PERF_SAMPLE_RAW + PERF_FLAG_FD_CLOEXEC = linux.PERF_FLAG_FD_CLOEXEC + RLIM_INFINITY = linux.RLIM_INFINITY + RLIMIT_MEMLOCK = linux.RLIMIT_MEMLOCK + BPF_STATS_RUN_TIME = linux.BPF_STATS_RUN_TIME + PERF_RECORD_LOST = linux.PERF_RECORD_LOST + PERF_RECORD_SAMPLE = linux.PERF_RECORD_SAMPLE + AT_FDCWD = linux.AT_FDCWD + RENAME_NOREPLACE = linux.RENAME_NOREPLACE + SO_ATTACH_BPF = linux.SO_ATTACH_BPF + SO_DETACH_BPF = linux.SO_DETACH_BPF + SOL_SOCKET = linux.SOL_SOCKET + SIGPROF = linux.SIGPROF + SIGUSR1 = linux.SIGUSR1 + SIG_BLOCK = linux.SIG_BLOCK + SIG_UNBLOCK = linux.SIG_UNBLOCK + BPF_FS_MAGIC = linux.BPF_FS_MAGIC + TRACEFS_MAGIC = linux.TRACEFS_MAGIC + DEBUGFS_MAGIC = linux.DEBUGFS_MAGIC + BPF_RB_NO_WAKEUP = linux.BPF_RB_NO_WAKEUP + BPF_RB_FORCE_WAKEUP = linux.BPF_RB_FORCE_WAKEUP + AF_UNSPEC = linux.AF_UNSPEC + IFF_UP = linux.IFF_UP ) -// Statfs_t is a wrapper type Statfs_t = linux.Statfs_t - type Stat_t = linux.Stat_t - -// Rlimit is a wrapper type Rlimit = linux.Rlimit +type Signal = linux.Signal +type Sigset_t = linux.Sigset_t +type PerfEventMmapPage = linux.PerfEventMmapPage +type EpollEvent = linux.EpollEvent +type PerfEventAttr = linux.PerfEventAttr +type Utsname = linux.Utsname +type CPUSet = linux.CPUSet -// Syscall is a wrapper func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { return linux.Syscall(trap, a1, a2, a3) } -// FcntlInt is a wrapper +func PthreadSigmask(how int, set, oldset *Sigset_t) error { + return linux.PthreadSigmask(how, set, oldset) +} + func FcntlInt(fd uintptr, cmd, arg int) (int, error) { return linux.FcntlInt(fd, cmd, arg) } -// IoctlSetInt is a wrapper func IoctlSetInt(fd int, req uint, value int) error { return linux.IoctlSetInt(fd, req, value) } -// Statfs is a wrapper func Statfs(path string, buf *Statfs_t) (err error) { return linux.Statfs(path, buf) } -// Close is a wrapper func Close(fd int) (err error) { return linux.Close(fd) } -// EpollEvent is a wrapper -type EpollEvent = linux.EpollEvent - -// EpollWait is a wrapper func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { return linux.EpollWait(epfd, events, msec) } -// EpollCtl is a wrapper func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { return linux.EpollCtl(epfd, op, fd, event) } -// Eventfd is a wrapper func Eventfd(initval uint, flags int) (fd int, err error) { return linux.Eventfd(initval, flags) } -// Write is a wrapper func Write(fd int, p []byte) (n int, err error) { return linux.Write(fd, p) } -// EpollCreate1 is a wrapper func EpollCreate1(flag int) (fd int, err error) { return linux.EpollCreate1(flag) } -// PerfEventMmapPage is a wrapper -type PerfEventMmapPage linux.PerfEventMmapPage - -// SetNonblock is a wrapper func SetNonblock(fd int, nonblocking bool) (err error) { return linux.SetNonblock(fd, nonblocking) } -// Mmap is a wrapper func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { return linux.Mmap(fd, offset, length, prot, flags) } -// Munmap is a wrapper +//go:nocheckptr +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + return linux.MmapPtr(fd, offset, addr, length, prot, flags) +} + func Munmap(b []byte) (err error) { return linux.Munmap(b) } -// PerfEventAttr is a wrapper -type PerfEventAttr = linux.PerfEventAttr - -// PerfEventOpen is a wrapper func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { return linux.PerfEventOpen(attr, pid, cpu, groupFd, flags) } -// Utsname is a wrapper -type Utsname = linux.Utsname - -// Uname is a wrapper func Uname(buf *Utsname) (err error) { return linux.Uname(buf) } -// Getpid is a wrapper func Getpid() int { return linux.Getpid() } -// Gettid is a wrapper func Gettid() int { return linux.Gettid() } -// Tgkill is a wrapper func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { return linux.Tgkill(tgid, tid, sig) } -// BytePtrFromString is a wrapper func BytePtrFromString(s string) (*byte, error) { return linux.BytePtrFromString(s) } -// ByteSliceToString is a wrapper func ByteSliceToString(s []byte) string { return linux.ByteSliceToString(s) } -// Renameat2 is a wrapper +func ByteSliceFromString(s string) ([]byte, error) { + return linux.ByteSliceFromString(s) +} + func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error { return linux.Renameat2(olddirfd, oldpath, newdirfd, newpath, flags) } @@ -207,3 +194,19 @@ func Open(path string, mode int, perm uint32) (int, error) { func Fstat(fd int, stat *Stat_t) error { return linux.Fstat(fd, stat) } + +func SetsockoptInt(fd, level, opt, value int) error { + return linux.SetsockoptInt(fd, level, opt, value) +} + +func SchedSetaffinity(pid int, set *CPUSet) error { + return linux.SchedSetaffinity(pid, set) +} + +func SchedGetaffinity(pid int, set *CPUSet) error { + return linux.SchedGetaffinity(pid, set) +} + +func Auxv() ([][2]uintptr, error) { + return linux.Auxv() +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/types_other.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/types_other.go index abd8ea93d..f3f764ebe 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/types_other.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/unix/types_other.go @@ -1,79 +1,80 @@ //go:build !linux -// +build !linux package unix import ( - "fmt" - "runtime" "syscall" + "unsafe" ) -var errNonLinux = fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH) - +// Constants are distinct to avoid breaking switch statements. const ( - ENOENT = syscall.ENOENT - EEXIST = syscall.EEXIST - EAGAIN = syscall.EAGAIN - ENOSPC = syscall.ENOSPC - EINVAL = syscall.EINVAL - EINTR = syscall.EINTR - EPERM = syscall.EPERM - ESRCH = syscall.ESRCH - ENODEV = syscall.ENODEV - EBADF = syscall.Errno(0) - E2BIG = syscall.Errno(0) - EFAULT = syscall.EFAULT - // ENOTSUPP is not the same as ENOTSUP or EOPNOTSUP - ENOTSUPP = syscall.Errno(0x20c) - - BPF_F_NO_PREALLOC = 0 - BPF_F_NUMA_NODE = 0 - BPF_F_RDONLY = 0 - BPF_F_WRONLY = 0 - BPF_F_RDONLY_PROG = 0 - BPF_F_WRONLY_PROG = 0 - BPF_F_SLEEPABLE = 0 - BPF_F_MMAPABLE = 0 - BPF_F_INNER_MAP = 0 - BPF_OBJ_NAME_LEN = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_RINGBUF_BUSY_BIT = 0 - BPF_RINGBUF_DISCARD_BIT = 0 - BPF_RINGBUF_HDR_SZ = 0 - SYS_BPF = 321 - F_DUPFD_CLOEXEC = 0x406 - EPOLLIN = 0x1 - EPOLL_CTL_ADD = 0x1 - EPOLL_CLOEXEC = 0x80000 - O_CLOEXEC = 0x80000 - O_NONBLOCK = 0x800 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - MAP_SHARED = 0x1 - PERF_ATTR_SIZE_VER1 = 0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - PERF_EVENT_IOC_DISABLE = 0 - PERF_EVENT_IOC_ENABLE = 0 - PERF_EVENT_IOC_SET_BPF = 0 - PerfBitWatermark = 0x4000 - PERF_SAMPLE_RAW = 0x400 - PERF_FLAG_FD_CLOEXEC = 0x8 - RLIM_INFINITY = 0x7fffffffffffffff - RLIMIT_MEMLOCK = 8 - BPF_STATS_RUN_TIME = 0 - PERF_RECORD_LOST = 2 - PERF_RECORD_SAMPLE = 9 - AT_FDCWD = -0x2 - RENAME_NOREPLACE = 0x1 - SO_ATTACH_BPF = 0x32 - SO_DETACH_BPF = 0x1b - SOL_SOCKET = 0x1 + BPF_F_NO_PREALLOC = iota + BPF_F_NUMA_NODE + BPF_F_RDONLY + BPF_F_WRONLY + BPF_F_RDONLY_PROG + BPF_F_WRONLY_PROG + BPF_F_SLEEPABLE + BPF_F_MMAPABLE + BPF_F_INNER_MAP + BPF_F_KPROBE_MULTI_RETURN + BPF_F_UPROBE_MULTI_RETURN + BPF_F_XDP_HAS_FRAGS + BPF_OBJ_NAME_LEN + BPF_TAG_SIZE + BPF_RINGBUF_BUSY_BIT + BPF_RINGBUF_DISCARD_BIT + BPF_RINGBUF_HDR_SZ + SYS_BPF + F_DUPFD_CLOEXEC + EPOLLIN + EPOLL_CTL_ADD + EPOLL_CLOEXEC + O_CLOEXEC + O_NONBLOCK + PROT_NONE + PROT_READ + PROT_WRITE + MAP_ANON + MAP_SHARED + MAP_FIXED + MAP_PRIVATE + PERF_ATTR_SIZE_VER1 + PERF_TYPE_SOFTWARE + PERF_TYPE_TRACEPOINT + PERF_COUNT_SW_BPF_OUTPUT + PERF_EVENT_IOC_DISABLE + PERF_EVENT_IOC_ENABLE + PERF_EVENT_IOC_SET_BPF + PerfBitWatermark + PerfBitWriteBackward + PERF_SAMPLE_RAW + PERF_FLAG_FD_CLOEXEC + RLIM_INFINITY + RLIMIT_MEMLOCK + BPF_STATS_RUN_TIME + PERF_RECORD_LOST + PERF_RECORD_SAMPLE + AT_FDCWD + RENAME_NOREPLACE + SO_ATTACH_BPF + SO_DETACH_BPF + SOL_SOCKET + SIGPROF + SIGUSR1 + SIG_BLOCK + SIG_UNBLOCK + BPF_FS_MAGIC + TRACEFS_MAGIC + DEBUGFS_MAGIC + BPF_RB_NO_WAKEUP + BPF_RB_FORCE_WAKEUP + BPF_F_LOCK + AF_UNSPEC + IFF_UP ) -// Statfs_t is a wrapper type Statfs_t struct { Type int64 Bsize int64 @@ -89,72 +90,81 @@ type Statfs_t struct { Spare [4]int64 } -type Stat_t struct{} +type Stat_t struct { + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint32 + Uid uint32 + Gid uint32 + _ int32 + Rdev uint64 + Size int64 + Blksize int64 + Blocks int64 +} -// Rlimit is a wrapper type Rlimit struct { Cur uint64 Max uint64 } -// Syscall is a wrapper -func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { - return 0, 0, syscall.Errno(1) +type Signal int + +type Sigset_t struct { + Val [4]uint64 +} + +func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) { + return 0, 0, ENOTSUP +} + +func PthreadSigmask(how int, set, oldset *Sigset_t) error { + return errNonLinux() } -// FcntlInt is a wrapper func FcntlInt(fd uintptr, cmd, arg int) (int, error) { - return -1, errNonLinux + return -1, errNonLinux() } -// IoctlSetInt is a wrapper func IoctlSetInt(fd int, req uint, value int) error { - return errNonLinux + return errNonLinux() } -// Statfs is a wrapper func Statfs(path string, buf *Statfs_t) error { - return errNonLinux + return errNonLinux() } -// Close is a wrapper func Close(fd int) (err error) { - return errNonLinux + return errNonLinux() } -// EpollEvent is a wrapper type EpollEvent struct { Events uint32 Fd int32 Pad int32 } -// EpollWait is a wrapper func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - return 0, errNonLinux + return 0, errNonLinux() } -// EpollCtl is a wrapper func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - return errNonLinux + return errNonLinux() } -// Eventfd is a wrapper func Eventfd(initval uint, flags int) (fd int, err error) { - return 0, errNonLinux + return 0, errNonLinux() } -// Write is a wrapper func Write(fd int, p []byte) (n int, err error) { - return 0, errNonLinux + return 0, errNonLinux() } -// EpollCreate1 is a wrapper func EpollCreate1(flag int) (fd int, err error) { - return 0, errNonLinux + return 0, errNonLinux() } -// PerfEventMmapPage is a wrapper type PerfEventMmapPage struct { Version uint32 Compat_version uint32 @@ -181,22 +191,22 @@ type PerfEventMmapPage struct { Aux_size uint64 } -// SetNonblock is a wrapper func SetNonblock(fd int, nonblocking bool) (err error) { - return errNonLinux + return errNonLinux() } -// Mmap is a wrapper func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { - return []byte{}, errNonLinux + return []byte{}, errNonLinux() +} + +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + return nil, errNonLinux() } -// Munmap is a wrapper func Munmap(b []byte) (err error) { - return errNonLinux + return errNonLinux() } -// PerfEventAttr is a wrapper type PerfEventAttr struct { Type uint32 Size uint32 @@ -218,60 +228,63 @@ type PerfEventAttr struct { Sample_max_stack uint16 } -// PerfEventOpen is a wrapper func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - return 0, errNonLinux + return 0, errNonLinux() } -// Utsname is a wrapper type Utsname struct { Release [65]byte Version [65]byte } -// Uname is a wrapper func Uname(buf *Utsname) (err error) { - return errNonLinux + return errNonLinux() } -// Getpid is a wrapper func Getpid() int { return -1 } -// Gettid is a wrapper func Gettid() int { return -1 } -// Tgkill is a wrapper func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - return errNonLinux + return errNonLinux() } -// BytePtrFromString is a wrapper -func BytePtrFromString(s string) (*byte, error) { - return nil, errNonLinux -} - -// ByteSliceToString is a wrapper -func ByteSliceToString(s []byte) string { - return "" -} - -// Renameat2 is a wrapper func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error { - return errNonLinux + return errNonLinux() } func Prlimit(pid, resource int, new, old *Rlimit) error { - return errNonLinux + return errNonLinux() } func Open(path string, mode int, perm uint32) (int, error) { - return -1, errNonLinux + return -1, errNonLinux() } func Fstat(fd int, stat *Stat_t) error { - return errNonLinux + return errNonLinux() +} + +func SetsockoptInt(fd, level, opt, value int) error { + return errNonLinux() +} + +type CPUSet struct{} + +func (*CPUSet) Set(int) {} + +func SchedSetaffinity(pid int, set *CPUSet) error { + return errNonLinux() +} + +func SchedGetaffinity(pid int, set *CPUSet) error { + return errNonLinux() +} + +func Auxv() ([][2]uintptr, error) { + return nil, errNonLinux() } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/vdso.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/vdso.go deleted file mode 100644 index aba860163..000000000 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/vdso.go +++ /dev/null @@ -1,145 +0,0 @@ -package internal - -import ( - "debug/elf" - "encoding/binary" - "errors" - "fmt" - "io" - "math" - "os" - - "github.com/cilium/ebpf/internal/unix" -) - -var ( - errAuxvNoVDSO = errors.New("no vdso address found in auxv") -) - -// vdsoVersion returns the LINUX_VERSION_CODE embedded in the vDSO library -// linked into the current process image. -func vdsoVersion() (uint32, error) { - // Read data from the auxiliary vector, which is normally passed directly - // to the process. Go does not expose that data, so we must read it from procfs. - // https://man7.org/linux/man-pages/man3/getauxval.3.html - av, err := os.Open("/proc/self/auxv") - if err != nil { - return 0, fmt.Errorf("opening auxv: %w", err) - } - defer av.Close() - - vdsoAddr, err := vdsoMemoryAddress(av) - if err != nil { - return 0, fmt.Errorf("finding vDSO memory address: %w", err) - } - - // Use /proc/self/mem rather than unsafe.Pointer tricks. - mem, err := os.Open("/proc/self/mem") - if err != nil { - return 0, fmt.Errorf("opening mem: %w", err) - } - defer mem.Close() - - // Open ELF at provided memory address, as offset into /proc/self/mem. - c, err := vdsoLinuxVersionCode(io.NewSectionReader(mem, int64(vdsoAddr), math.MaxInt64)) - if err != nil { - return 0, fmt.Errorf("reading linux version code: %w", err) - } - - return c, nil -} - -// vdsoMemoryAddress returns the memory address of the vDSO library -// linked into the current process image. r is an io.Reader into an auxv blob. -func vdsoMemoryAddress(r io.Reader) (uint64, error) { - const ( - _AT_NULL = 0 // End of vector - _AT_SYSINFO_EHDR = 33 // Offset to vDSO blob in process image - ) - - // Loop through all tag/value pairs in auxv until we find `AT_SYSINFO_EHDR`, - // the address of a page containing the virtual Dynamic Shared Object (vDSO). - aux := struct{ Tag, Val uint64 }{} - for { - if err := binary.Read(r, NativeEndian, &aux); err != nil { - return 0, fmt.Errorf("reading auxv entry: %w", err) - } - - switch aux.Tag { - case _AT_SYSINFO_EHDR: - if aux.Val != 0 { - return aux.Val, nil - } - return 0, fmt.Errorf("invalid vDSO address in auxv") - // _AT_NULL is always the last tag/val pair in the aux vector - // and can be treated like EOF. - case _AT_NULL: - return 0, errAuxvNoVDSO - } - } -} - -// format described at https://www.man7.org/linux/man-pages/man5/elf.5.html in section 'Notes (Nhdr)' -type elfNoteHeader struct { - NameSize int32 - DescSize int32 - Type int32 -} - -// vdsoLinuxVersionCode returns the LINUX_VERSION_CODE embedded in -// the ELF notes section of the binary provided by the reader. -func vdsoLinuxVersionCode(r io.ReaderAt) (uint32, error) { - hdr, err := NewSafeELFFile(r) - if err != nil { - return 0, fmt.Errorf("reading vDSO ELF: %w", err) - } - - sec := hdr.SectionByType(elf.SHT_NOTE) - if sec == nil { - return 0, fmt.Errorf("no note section found in vDSO ELF") - } - - sr := sec.Open() - var n elfNoteHeader - - // Read notes until we find one named 'Linux'. - for { - if err := binary.Read(sr, hdr.ByteOrder, &n); err != nil { - if errors.Is(err, io.EOF) { - return 0, fmt.Errorf("no Linux note in ELF") - } - return 0, fmt.Errorf("reading note header: %w", err) - } - - // If a note name is defined, it follows the note header. - var name string - if n.NameSize > 0 { - // Read the note name, aligned to 4 bytes. - buf := make([]byte, Align(int(n.NameSize), 4)) - if err := binary.Read(sr, hdr.ByteOrder, &buf); err != nil { - return 0, fmt.Errorf("reading note name: %w", err) - } - - // Read nul-terminated string. - name = unix.ByteSliceToString(buf[:n.NameSize]) - } - - // If a note descriptor is defined, it follows the name. - // It is possible for a note to have a descriptor but not a name. - if n.DescSize > 0 { - // LINUX_VERSION_CODE is a uint32 value. - if name == "Linux" && n.DescSize == 4 && n.Type == 0 { - var version uint32 - if err := binary.Read(sr, hdr.ByteOrder, &version); err != nil { - return 0, fmt.Errorf("reading note descriptor: %w", err) - } - return version, nil - } - - // Discard the note descriptor if it exists but we're not interested in it. - if _, err := io.CopyN(io.Discard, sr, int64(Align(int(n.DescSize), 4))); err != nil { - return 0, err - } - } - } -} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/version.go b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/version.go index 690c55273..3123dc9f0 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/internal/version.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/internal/version.go @@ -2,7 +2,6 @@ package internal import ( "fmt" - "sync" ) const ( @@ -13,14 +12,6 @@ const ( MagicKernelVersion = 0xFFFFFFFE ) -var ( - kernelVersion = struct { - once sync.Once - version Version - err error - }{} -) - // A Version in the form Major.Minor.Patch. type Version [3]uint16 @@ -75,33 +66,9 @@ func (v Version) Kernel() uint32 { // Kernels 4.4 and 4.9 have their SUBLEVEL clamped to 255 to avoid // overflowing into PATCHLEVEL. // See kernel commit 9b82f13e7ef3 ("kbuild: clamp SUBLEVEL to 255"). - s := v[2] - if s > 255 { - s = 255 - } + s := min(v[2], 255) // Truncate members to uint8 to prevent them from spilling over into // each other when overflowing 8 bits. return uint32(uint8(v[0]))<<16 | uint32(uint8(v[1]))<<8 | uint32(uint8(s)) } - -// KernelVersion returns the version of the currently running kernel. -func KernelVersion() (Version, error) { - kernelVersion.once.Do(func() { - kernelVersion.version, kernelVersion.err = detectKernelVersion() - }) - - if kernelVersion.err != nil { - return Version{}, kernelVersion.err - } - return kernelVersion.version, nil -} - -// detectKernelVersion returns the version of the running kernel. -func detectKernelVersion() (Version, error) { - vc, err := vdsoVersion() - if err != nil { - return Version{}, err - } - return NewVersionFromCode(vc), nil -} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/anchor.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/anchor.go new file mode 100644 index 000000000..10fbba079 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/link/anchor.go @@ -0,0 +1,139 @@ +//go:build !windows + +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +const anchorFlags = sys.BPF_F_REPLACE | + sys.BPF_F_BEFORE | + sys.BPF_F_AFTER | + sys.BPF_F_ID | + sys.BPF_F_LINK_MPROG + +// Anchor is a reference to a link or program. +// +// It is used to describe where an attachment or detachment should take place +// for link types which support multiple attachment. +type Anchor interface { + // anchor returns an fd or ID and a set of flags. + // + // By default fdOrID is taken to reference a program, but BPF_F_LINK_MPROG + // changes this to refer to a link instead. + // + // BPF_F_BEFORE, BPF_F_AFTER, BPF_F_REPLACE modify where a link or program + // is attached. The default behaviour if none of these flags is specified + // matches BPF_F_AFTER. + anchor() (fdOrID, flags uint32, _ error) +} + +type firstAnchor struct{} + +func (firstAnchor) anchor() (fdOrID, flags uint32, _ error) { + return 0, sys.BPF_F_BEFORE, nil +} + +// Head is the position before all other programs or links. +func Head() Anchor { + return firstAnchor{} +} + +type lastAnchor struct{} + +func (lastAnchor) anchor() (fdOrID, flags uint32, _ error) { + return 0, sys.BPF_F_AFTER, nil +} + +// Tail is the position after all other programs or links. +func Tail() Anchor { + return lastAnchor{} +} + +// Before is the position just in front of target. +func BeforeLink(target Link) Anchor { + return anchor{target, sys.BPF_F_BEFORE} +} + +// After is the position just after target. +func AfterLink(target Link) Anchor { + return anchor{target, sys.BPF_F_AFTER} +} + +// Before is the position just in front of target. +func BeforeLinkByID(target ID) Anchor { + return anchor{target, sys.BPF_F_BEFORE} +} + +// After is the position just after target. +func AfterLinkByID(target ID) Anchor { + return anchor{target, sys.BPF_F_AFTER} +} + +// Before is the position just in front of target. +func BeforeProgram(target *ebpf.Program) Anchor { + return anchor{target, sys.BPF_F_BEFORE} +} + +// After is the position just after target. +func AfterProgram(target *ebpf.Program) Anchor { + return anchor{target, sys.BPF_F_AFTER} +} + +// Replace the target itself. +func ReplaceProgram(target *ebpf.Program) Anchor { + return anchor{target, sys.BPF_F_REPLACE} +} + +// Before is the position just in front of target. +func BeforeProgramByID(target ebpf.ProgramID) Anchor { + return anchor{target, sys.BPF_F_BEFORE} +} + +// After is the position just after target. +func AfterProgramByID(target ebpf.ProgramID) Anchor { + return anchor{target, sys.BPF_F_AFTER} +} + +// Replace the target itself. +func ReplaceProgramByID(target ebpf.ProgramID) Anchor { + return anchor{target, sys.BPF_F_REPLACE} +} + +type anchor struct { + target any + position uint32 +} + +func (ap anchor) anchor() (fdOrID, flags uint32, _ error) { + var typeFlag uint32 + switch target := ap.target.(type) { + case *ebpf.Program: + fd := target.FD() + if fd < 0 { + return 0, 0, sys.ErrClosedFd + } + fdOrID = uint32(fd) + typeFlag = 0 + case ebpf.ProgramID: + fdOrID = uint32(target) + typeFlag = sys.BPF_F_ID + case interface{ FD() int }: + fd := target.FD() + if fd < 0 { + return 0, 0, sys.ErrClosedFd + } + fdOrID = uint32(fd) + typeFlag = sys.BPF_F_LINK_MPROG + case ID: + fdOrID = uint32(target) + typeFlag = sys.BPF_F_LINK_MPROG | sys.BPF_F_ID + default: + return 0, 0, fmt.Errorf("invalid target %T", ap.target) + } + + return fdOrID, ap.position | typeFlag, nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/cgroup.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/cgroup.go index b3e65cfec..64aea1ae3 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/link/cgroup.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/link/cgroup.go @@ -1,3 +1,5 @@ +//go:build !windows + package link import ( @@ -6,14 +8,20 @@ import ( "os" "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" ) type cgroupAttachFlags uint32 -// cgroup attach flags const ( + // Allow programs attached to sub-cgroups to override the verdict of this + // program. flagAllowOverride cgroupAttachFlags = 1 << iota + // Allow attaching multiple programs to the cgroup. Only works if the cgroup + // has zero or more programs attached using the Multi flag. Implies override. flagAllowMulti + // Set automatically by progAttachCgroup.Update(). Used for updating a + // specific given program attached in multi-mode. flagReplace ) @@ -27,47 +35,45 @@ type CgroupOptions struct { } // AttachCgroup links a BPF program to a cgroup. -func AttachCgroup(opts CgroupOptions) (Link, error) { +// +// If the running kernel doesn't support bpf_link, attempts to emulate its +// semantics using the legacy PROG_ATTACH mechanism. If bpf_link is not +// available, the returned [Link] will not support pinning to bpffs. +// +// If you need more control over attachment flags or the attachment mechanism +// used, look at [RawAttachProgram] and [AttachRawLink] instead. +func AttachCgroup(opts CgroupOptions) (cg Link, err error) { cgroup, err := os.Open(opts.Path) if err != nil { return nil, fmt.Errorf("can't open cgroup: %s", err) } - - clone, err := opts.Program.Clone() - if err != nil { + defer func() { + if _, ok := cg.(*progAttachCgroup); ok { + // Skip closing the cgroup handle if we return a valid progAttachCgroup, + // where the handle is retained to implement Update(). + return + } cgroup.Close() - return nil, err + }() + + cg, err = newLinkCgroup(cgroup, opts.Attach, opts.Program) + if err == nil { + return cg, nil } - var cg Link - cg, err = newLinkCgroup(cgroup, opts.Attach, clone) if errors.Is(err, ErrNotSupported) { - cg, err = newProgAttachCgroup(cgroup, opts.Attach, clone, flagAllowMulti) + cg, err = newProgAttachCgroup(cgroup, opts.Attach, opts.Program, flagAllowMulti) } if errors.Is(err, ErrNotSupported) { - cg, err = newProgAttachCgroup(cgroup, opts.Attach, clone, flagAllowOverride) + cg, err = newProgAttachCgroup(cgroup, opts.Attach, opts.Program, flagAllowOverride) } if err != nil { - cgroup.Close() - clone.Close() return nil, err } return cg, nil } -// LoadPinnedCgroup loads a pinned cgroup from a bpffs. -// -// Deprecated: use LoadPinnedLink instead. -func LoadPinnedCgroup(fileName string, opts *ebpf.LoadPinOptions) (Link, error) { - link, err := LoadPinnedRawLink(fileName, CgroupType, opts) - if err != nil { - return nil, err - } - - return &linkCgroup{*link}, nil -} - type progAttachCgroup struct { cgroup *os.File current *ebpf.Program @@ -79,6 +85,8 @@ var _ Link = (*progAttachCgroup)(nil) func (cg *progAttachCgroup) isLink() {} +// newProgAttachCgroup attaches prog to cgroup using BPF_PROG_ATTACH. +// cgroup and prog are retained by [progAttachCgroup]. func newProgAttachCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program, flags cgroupAttachFlags) (*progAttachCgroup, error) { if flags&flagAllowMulti > 0 { if err := haveProgAttachReplace(); err != nil { @@ -86,17 +94,24 @@ func newProgAttachCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Pro } } - err := RawAttachProgram(RawAttachProgramOptions{ + // Use a program handle that cannot be closed by the caller. + clone, err := prog.Clone() + if err != nil { + return nil, err + } + + err = RawAttachProgram(RawAttachProgramOptions{ Target: int(cgroup.Fd()), - Program: prog, + Program: clone, Flags: uint32(flags), Attach: attach, }) if err != nil { + clone.Close() return nil, fmt.Errorf("cgroup: %w", err) } - return &progAttachCgroup{cgroup, prog, attach, flags}, nil + return &progAttachCgroup{cgroup, clone, attach, flags}, nil } func (cg *progAttachCgroup) Close() error { @@ -131,8 +146,7 @@ func (cg *progAttachCgroup) Update(prog *ebpf.Program) error { // Atomically replacing multiple programs requires at least // 5.5 (commit 7dd68b3279f17921 "bpf: Support replacing cgroup-bpf // program in MULTI mode") - args.Flags |= uint32(flagReplace) - args.Replace = cg.current + args.Anchor = ReplaceProgram(cg.current) } if err := RawAttachProgram(args); err != nil { @@ -150,7 +164,11 @@ func (cg *progAttachCgroup) Pin(string) error { } func (cg *progAttachCgroup) Unpin() error { - return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported) + return fmt.Errorf("can't unpin cgroup: %w", ErrNotSupported) +} + +func (cg *progAttachCgroup) Detach() error { + return fmt.Errorf("can't detach cgroup: %w", ErrNotSupported) } func (cg *progAttachCgroup) Info() (*Info, error) { @@ -163,6 +181,7 @@ type linkCgroup struct { var _ Link = (*linkCgroup)(nil) +// newLinkCgroup attaches prog to cgroup using BPF_LINK_CREATE. func newLinkCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program) (*linkCgroup, error) { link, err := AttachRawLink(RawLinkOptions{ Target: int(cgroup.Fd()), @@ -175,3 +194,21 @@ func newLinkCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program) return &linkCgroup{*link}, err } + +func (cg *linkCgroup) Info() (*Info, error) { + var info sys.CgroupLinkInfo + if err := sys.ObjInfo(cg.fd, &info); err != nil { + return nil, fmt.Errorf("cgroup link info: %s", err) + } + extra := &CgroupInfo{ + CgroupId: info.CgroupId, + AttachType: info.AttachType, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/iter.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/iter.go index 289733e47..40bb69c70 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/link/iter.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/link/iter.go @@ -1,3 +1,5 @@ +//go:build !windows + package link import ( @@ -25,10 +27,6 @@ type IterOptions struct { // AttachIter attaches a BPF seq_file iterator. func AttachIter(opts IterOptions) (*Iter, error) { - if err := haveBPFLink(); err != nil { - return nil, err - } - progFd := opts.Program.FD() if progFd < 0 { return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) @@ -46,30 +44,21 @@ func AttachIter(opts IterOptions) (*Iter, error) { attr := sys.LinkCreateIterAttr{ ProgFd: uint32(progFd), AttachType: sys.AttachType(ebpf.AttachTraceIter), - IterInfo: sys.NewPointer(unsafe.Pointer(&info)), + IterInfo: sys.UnsafePointer(unsafe.Pointer(&info)), IterInfoLen: uint32(unsafe.Sizeof(info)), } fd, err := sys.LinkCreateIter(&attr) if err != nil { + if haveFeatErr := haveBPFLink(); haveFeatErr != nil { + return nil, haveFeatErr + } return nil, fmt.Errorf("can't link iterator: %w", err) } return &Iter{RawLink{fd, ""}}, err } -// LoadPinnedIter loads a pinned iterator from a bpffs. -// -// Deprecated: use LoadPinnedLink instead. -func LoadPinnedIter(fileName string, opts *ebpf.LoadPinOptions) (*Iter, error) { - link, err := LoadPinnedRawLink(fileName, IterType, opts) - if err != nil { - return nil, err - } - - return &Iter{*link}, err -} - // Iter represents an attached bpf_iter. type Iter struct { RawLink @@ -88,7 +77,7 @@ func (it *Iter) Open() (io.ReadCloser, error) { return nil, fmt.Errorf("can't create iterator: %w", err) } - return fd.File("bpf_iter"), nil + return fd.File("bpf_iter") } // union bpf_iter_link_info.map diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/kprobe.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/kprobe.go index 6b896360d..0912e0a08 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/link/kprobe.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/link/kprobe.go @@ -1,182 +1,208 @@ +//go:build !windows + package link import ( - "bytes" - "crypto/rand" "errors" "fmt" "os" - "path/filepath" "runtime" - "sync" + "strings" "unsafe" "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/linux" "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/tracefs" "github.com/cilium/ebpf/internal/unix" ) -var ( - kprobeEventsPath = filepath.Join(tracefsPath, "kprobe_events") - - kprobeRetprobeBit = struct { - once sync.Once - value uint64 - err error - }{} -) - -type probeType uint8 - -type probeArgs struct { - symbol, group, path string - offset, refCtrOffset uint64 - pid int - ret bool -} - -const ( - kprobeType probeType = iota - uprobeType -) - -func (pt probeType) String() string { - if pt == kprobeType { - return "kprobe" - } - return "uprobe" +// KprobeOptions defines additional parameters that will be used +// when loading Kprobes. +type KprobeOptions struct { + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + // + // Needs kernel 5.15+. + Cookie uint64 + // Offset of the kprobe relative to the traced symbol. + // Can be used to insert kprobes at arbitrary offsets in kernel functions, + // e.g. in places where functions have been inlined. + Offset uint64 + // Increase the maximum number of concurrent invocations of a kretprobe. + // Required when tracing some long running functions in the kernel. + // + // Deprecated: this setting forces the use of an outdated kernel API and is not portable + // across kernel versions. + RetprobeMaxActive int + // Prefix used for the event name if the kprobe must be attached using tracefs. + // The group name will be formatted as `_`. + // The default empty string is equivalent to "ebpf" as the prefix. + TraceFSPrefix string } -func (pt probeType) EventsPath() string { - if pt == kprobeType { - return kprobeEventsPath +func (ko *KprobeOptions) cookie() uint64 { + if ko == nil { + return 0 } - return uprobeEventsPath -} - -func (pt probeType) PerfEventType(ret bool) perfEventType { - if pt == kprobeType { - if ret { - return kretprobeEvent - } - return kprobeEvent - } - if ret { - return uretprobeEvent - } - return uprobeEvent -} - -func (pt probeType) RetprobeBit() (uint64, error) { - if pt == kprobeType { - return kretprobeBit() - } - return uretprobeBit() + return ko.Cookie } // Kprobe attaches the given eBPF program to a perf event that fires when the // given kernel symbol starts executing. See /proc/kallsyms for available // symbols. For example, printk(): // -// kp, err := Kprobe("printk", prog) +// kp, err := Kprobe("printk", prog, nil) // // Losing the reference to the resulting Link (kp) will close the Kprobe // and prevent further execution of prog. The Link must be Closed during // program shutdown to avoid leaking system resources. -func Kprobe(symbol string, prog *ebpf.Program) (Link, error) { - k, err := kprobe(symbol, prog, false) +// +// If attaching to symbol fails, automatically retries with the running +// platform's syscall prefix (e.g. __x64_) to support attaching to syscalls +// in a portable fashion. +// +// On kernels 6.11 and later, setting a kprobe on a nonexistent symbol using +// tracefs incorrectly returns [unix.EINVAL] instead of [os.ErrNotExist]. +// +// The returned Link may implement [PerfEvent]. +func Kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) { + k, err := kprobe(symbol, prog, opts, false) if err != nil { return nil, err } - err = k.attach(prog) + lnk, err := attachPerfEvent(k, prog, opts.cookie()) if err != nil { k.Close() return nil, err } - return k, nil + return lnk, nil } // Kretprobe attaches the given eBPF program to a perf event that fires right // before the given kernel symbol exits, with the function stack left intact. // See /proc/kallsyms for available symbols. For example, printk(): // -// kp, err := Kretprobe("printk", prog) +// kp, err := Kretprobe("printk", prog, nil) // // Losing the reference to the resulting Link (kp) will close the Kretprobe // and prevent further execution of prog. The Link must be Closed during // program shutdown to avoid leaking system resources. -func Kretprobe(symbol string, prog *ebpf.Program) (Link, error) { - k, err := kprobe(symbol, prog, true) +// +// If attaching to symbol fails, automatically retries with the running +// platform's syscall prefix (e.g. __x64_) to support attaching to syscalls +// in a portable fashion. +// +// On kernels 5.10 and earlier, setting a kretprobe on a nonexistent symbol +// incorrectly returns [unix.EINVAL] instead of [os.ErrNotExist]. +// +// The returned Link may implement [PerfEvent]. +func Kretprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) { + k, err := kprobe(symbol, prog, opts, true) if err != nil { return nil, err } - err = k.attach(prog) + lnk, err := attachPerfEvent(k, prog, opts.cookie()) if err != nil { k.Close() return nil, err } - return k, nil + return lnk, nil +} + +// isValidKprobeSymbol implements the equivalent of a regex match +// against "^[a-zA-Z_][0-9a-zA-Z_.]*$". +func isValidKprobeSymbol(s string) bool { + if len(s) < 1 { + return false + } + + for i, c := range []byte(s) { + switch { + case c >= 'a' && c <= 'z': + case c >= 'A' && c <= 'Z': + case c == '_': + case i > 0 && c >= '0' && c <= '9': + + // Allow `.` in symbol name. GCC-compiled kernel may change symbol name + // to have a `.isra.$n` suffix, like `udp_send_skb.isra.52`. + // See: https://gcc.gnu.org/gcc-10/changes.html + case i > 0 && c == '.': + + default: + return false + } + } + + return true } // kprobe opens a perf event on the given symbol and attaches prog to it. // If ret is true, create a kretprobe. -func kprobe(symbol string, prog *ebpf.Program, ret bool) (*perfEvent, error) { +func kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions, ret bool) (*perfEvent, error) { if symbol == "" { return nil, fmt.Errorf("symbol name cannot be empty: %w", errInvalidInput) } if prog == nil { return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) } - if !rgxTraceEvent.MatchString(symbol) { - return nil, fmt.Errorf("symbol '%s' must be alphanumeric or underscore: %w", symbol, errInvalidInput) + if !isValidKprobeSymbol(symbol) { + return nil, fmt.Errorf("symbol '%s' must be a valid symbol in /proc/kallsyms: %w", symbol, errInvalidInput) } if prog.Type() != ebpf.Kprobe { return nil, fmt.Errorf("eBPF program type %s is not a Kprobe: %w", prog.Type(), errInvalidInput) } - args := probeArgs{ - pid: perfAllThreads, - symbol: platformPrefix(symbol), - ret: ret, + args := tracefs.ProbeArgs{ + Type: tracefs.Kprobe, + Pid: perfAllThreads, + Symbol: symbol, + Ret: ret, + } + + if opts != nil { + args.RetprobeMaxActive = opts.RetprobeMaxActive + args.Cookie = opts.Cookie + args.Offset = opts.Offset + args.Group = opts.TraceFSPrefix } // Use kprobe PMU if the kernel has it available. - tp, err := pmuKprobe(args) - if errors.Is(err, os.ErrNotExist) { - args.symbol = symbol - tp, err = pmuKprobe(args) + tp, err := pmuProbe(args) + if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { + if prefix := linux.PlatformPrefix(); prefix != "" { + args.Symbol = prefix + symbol + tp, err = pmuProbe(args) + } } if err == nil { return tp, nil } - if err != nil && !errors.Is(err, ErrNotSupported) { - return nil, fmt.Errorf("creating perf_kprobe PMU: %w", err) + if !errors.Is(err, ErrNotSupported) { + return nil, fmt.Errorf("creating perf_kprobe PMU (arch-specific fallback for %q): %w", symbol, err) } // Use tracefs if kprobe PMU is missing. - args.symbol = platformPrefix(symbol) - tp, err = tracefsKprobe(args) - if errors.Is(err, os.ErrNotExist) { - args.symbol = symbol - tp, err = tracefsKprobe(args) + args.Symbol = symbol + tp, err = tracefsProbe(args) + if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { + if prefix := linux.PlatformPrefix(); prefix != "" { + args.Symbol = prefix + symbol + tp, err = tracefsProbe(args) + } } if err != nil { - return nil, fmt.Errorf("creating trace event '%s' in tracefs: %w", symbol, err) + return nil, fmt.Errorf("creating tracefs event (arch-specific fallback for %q): %w", symbol, err) } return tp, nil } -// pmuKprobe opens a perf event based on the kprobe PMU. -// Returns os.ErrNotExist if the given symbol does not exist in the kernel. -func pmuKprobe(args probeArgs) (*perfEvent, error) { - return pmuProbe(kprobeType, args) -} - // pmuProbe opens a perf event based on a Performance Monitoring Unit. // // Requires at least a 4.17 kernel. @@ -184,17 +210,25 @@ func pmuKprobe(args probeArgs) (*perfEvent, error) { // 33ea4b24277b "perf/core: Implement the 'perf_uprobe' PMU" // // Returns ErrNotSupported if the kernel doesn't support perf_[k,u]probe PMU -func pmuProbe(typ probeType, args probeArgs) (*perfEvent, error) { +func pmuProbe(args tracefs.ProbeArgs) (*perfEvent, error) { // Getting the PMU type will fail if the kernel doesn't support // the perf_[k,u]probe PMU. - et, err := getPMUEventType(typ) + eventType, err := internal.ReadUint64FromFileOnce("%d\n", "/sys/bus/event_source/devices", args.Type.String(), "type") + if errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("%s: %w", args.Type, ErrNotSupported) + } if err != nil { return nil, err } + // Use tracefs if we want to set kretprobe's retprobeMaxActive. + if args.RetprobeMaxActive != 0 { + return nil, fmt.Errorf("pmu probe: non-zero retprobeMaxActive: %w", ErrNotSupported) + } + var config uint64 - if args.ret { - bit, err := typ.RetprobeBit() + if args.Ret { + bit, err := internal.ReadUint64FromFileOnce("config:%d\n", "/sys/bus/event_source/devices", args.Type.String(), "/format/retprobe") if err != nil { return nil, err } @@ -202,60 +236,85 @@ func pmuProbe(typ probeType, args probeArgs) (*perfEvent, error) { } var ( - attr unix.PerfEventAttr - sp unsafe.Pointer + attr unix.PerfEventAttr + sp unsafe.Pointer + token string ) - switch typ { - case kprobeType: + switch args.Type { + case tracefs.Kprobe: // Create a pointer to a NUL-terminated string for the kernel. - sp, err = unsafeStringPtr(args.symbol) + sp, err = unsafeStringPtr(args.Symbol) if err != nil { return nil, err } + token = tracefs.KprobeToken(args) + attr = unix.PerfEventAttr{ - Type: uint32(et), // PMU event type read from sysfs + // The minimum size required for PMU kprobes is PERF_ATTR_SIZE_VER1, + // since it added the config2 (Ext2) field. Use Ext2 as probe_offset. + Size: unix.PERF_ATTR_SIZE_VER1, + Type: uint32(eventType), // PMU event type read from sysfs Ext1: uint64(uintptr(sp)), // Kernel symbol to trace + Ext2: args.Offset, // Kernel symbol offset Config: config, // Retprobe flag } - case uprobeType: - sp, err = unsafeStringPtr(args.path) + case tracefs.Uprobe: + sp, err = unsafeStringPtr(args.Path) if err != nil { return nil, err } - if args.refCtrOffset != 0 { - config |= args.refCtrOffset << uprobeRefCtrOffsetShift + if args.RefCtrOffset != 0 { + config |= args.RefCtrOffset << uprobeRefCtrOffsetShift } + token = tracefs.UprobeToken(args) + attr = unix.PerfEventAttr{ // The minimum size required for PMU uprobes is PERF_ATTR_SIZE_VER1, // since it added the config2 (Ext2) field. The Size field controls the // size of the internal buffer the kernel allocates for reading the // perf_event_attr argument from userspace. Size: unix.PERF_ATTR_SIZE_VER1, - Type: uint32(et), // PMU event type read from sysfs + Type: uint32(eventType), // PMU event type read from sysfs Ext1: uint64(uintptr(sp)), // Uprobe path - Ext2: args.offset, // Uprobe offset + Ext2: args.Offset, // Uprobe offset Config: config, // RefCtrOffset, Retprobe flag } } - rawFd, err := unix.PerfEventOpen(&attr, args.pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC) + cpu := 0 + if args.Pid != perfAllThreads { + cpu = -1 + } + rawFd, err := unix.PerfEventOpen(&attr, args.Pid, cpu, -1, unix.PERF_FLAG_FD_CLOEXEC) + // On some old kernels, kprobe PMU doesn't allow `.` in symbol names and + // return -EINVAL. Return ErrNotSupported to allow falling back to tracefs. + // https://github.com/torvalds/linux/blob/94710cac0ef4/kernel/trace/trace_kprobe.c#L340-L343 + if errors.Is(err, unix.EINVAL) && strings.Contains(args.Symbol, ".") { + return nil, fmt.Errorf("token %s: older kernels don't accept dots: %w", token, ErrNotSupported) + } // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL - // when trying to create a kretprobe for a missing symbol. Make sure ENOENT - // is returned to the caller. - if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { - return nil, fmt.Errorf("symbol '%s' not found: %w", args.symbol, os.ErrNotExist) + // when trying to create a retprobe for a missing symbol. + if errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("token %s: not found: %w", token, err) + } + // Since commit ab105a4fb894, EILSEQ is returned when a kprobe sym+offset is resolved + // to an invalid insn boundary. The exact conditions that trigger this error are + // arch specific however. + if errors.Is(err, unix.EILSEQ) { + return nil, fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist) } // Since at least commit cb9a19fe4aa51, ENOTSUPP is returned // when attempting to set a uprobe on a trap instruction. - if errors.Is(err, unix.ENOTSUPP) { - return nil, fmt.Errorf("failed setting uprobe on offset %#x (possible trap insn): %w", args.offset, err) + if errors.Is(err, sys.ENOTSUPP) { + return nil, fmt.Errorf("token %s: failed setting uprobe on offset %#x (possible trap insn): %w", token, args.Offset, err) } + if err != nil { - return nil, fmt.Errorf("opening perf event: %w", err) + return nil, fmt.Errorf("token %s: opening perf event: %w", token, err) } // Ensure the string pointer is not collected before PerfEventOpen returns. @@ -267,17 +326,7 @@ func pmuProbe(typ probeType, args probeArgs) (*perfEvent, error) { } // Kernel has perf_[k,u]probe PMU available, initialize perf event. - return &perfEvent{ - fd: fd, - pmuID: et, - name: args.symbol, - typ: typ.PerfEventType(args.ret), - }, nil -} - -// tracefsKprobe creates a Kprobe tracefs entry. -func tracefsKprobe(args probeArgs) (*perfEvent, error) { - return tracefsProbe(kprobeType, args) + return newPerfEvent(fd, nil), nil } // tracefsProbe creates a trace event by writing an entry to /[k,u]probe_events. @@ -286,184 +335,37 @@ func tracefsKprobe(args probeArgs) (*perfEvent, error) { // Path and offset are only set in the case of uprobe(s) and are used to set // the executable/library path on the filesystem and the offset where the probe is inserted. // A perf event is then opened on the newly-created trace event and returned to the caller. -func tracefsProbe(typ probeType, args probeArgs) (*perfEvent, error) { +func tracefsProbe(args tracefs.ProbeArgs) (*perfEvent, error) { + groupPrefix := "ebpf" + if args.Group != "" { + groupPrefix = args.Group + } + // Generate a random string for each trace event we attempt to create. // This value is used as the 'group' token in tracefs to allow creating // multiple kprobe trace events with the same name. - group, err := randomGroup("ebpf") + group, err := tracefs.RandomGroup(groupPrefix) if err != nil { return nil, fmt.Errorf("randomizing group name: %w", err) } - args.group = group - - // Before attempting to create a trace event through tracefs, - // check if an event with the same group and name already exists. - // Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate - // entry, so we need to rely on reads for detecting uniqueness. - _, err = getTraceEventID(group, args.symbol) - if err == nil { - return nil, fmt.Errorf("trace event already exists: %s/%s", group, args.symbol) - } - if err != nil && !errors.Is(err, os.ErrNotExist) { - return nil, fmt.Errorf("checking trace event %s/%s: %w", group, args.symbol, err) - } + args.Group = group // Create the [k,u]probe trace event using tracefs. - if err := createTraceFSProbeEvent(typ, args); err != nil { - return nil, fmt.Errorf("creating probe entry on tracefs: %w", err) - } - - // Get the newly-created trace event's id. - tid, err := getTraceEventID(group, args.symbol) + evt, err := tracefs.NewEvent(args) if err != nil { - return nil, fmt.Errorf("getting trace event id: %w", err) + return nil, fmt.Errorf("creating probe entry on tracefs: %w", err) } // Kprobes are ephemeral tracepoints and share the same perf event type. - fd, err := openTracepointPerfEvent(tid, args.pid) + fd, err := openTracepointPerfEvent(evt.ID(), args.Pid) if err != nil { + // Make sure we clean up the created tracefs event when we return error. + // If a livepatch handler is already active on the symbol, the write to + // tracefs will succeed, a trace event will show up, but creating the + // perf event will fail with EBUSY. + _ = evt.Close() return nil, err } - return &perfEvent{ - fd: fd, - group: group, - name: args.symbol, - tracefsID: tid, - typ: typ.PerfEventType(args.ret), - }, nil -} - -// createTraceFSProbeEvent creates a new ephemeral trace event by writing to -// /[k,u]probe_events. Returns os.ErrNotExist if symbol is not a valid -// kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist -// if a probe with the same group and symbol already exists. -func createTraceFSProbeEvent(typ probeType, args probeArgs) error { - // Open the kprobe_events file in tracefs. - f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666) - if err != nil { - return fmt.Errorf("error opening '%s': %w", typ.EventsPath(), err) - } - defer f.Close() - - var pe string - switch typ { - case kprobeType: - // The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt): - // p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe - // r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe - // -:[GRP/]EVENT : Clear a probe - // - // Some examples: - // r:ebpf_1234/r_my_kretprobe nf_conntrack_destroy - // p:ebpf_5678/p_my_kprobe __x64_sys_execve - // - // Leaving the kretprobe's MAXACTIVE set to 0 (or absent) will make the - // kernel default to NR_CPUS. This is desired in most eBPF cases since - // subsampling or rate limiting logic can be more accurately implemented in - // the eBPF program itself. - // See Documentation/kprobes.txt for more details. - pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret), args.group, args.symbol, args.symbol) - case uprobeType: - // The uprobe_events syntax is as follows: - // p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe - // r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a return probe - // -:[GRP/]EVENT : Clear a probe - // - // Some examples: - // r:ebpf_1234/readline /bin/bash:0x12345 - // p:ebpf_5678/main_mySymbol /bin/mybin:0x12345(0x123) - // - // See Documentation/trace/uprobetracer.txt for more details. - pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.ret), args.group, args.symbol, uprobeToken(args)) - } - _, err = f.WriteString(pe) - // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL - // when trying to create a kretprobe for a missing symbol. Make sure ENOENT - // is returned to the caller. - if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { - return fmt.Errorf("symbol %s not found: %w", args.symbol, os.ErrNotExist) - } - if err != nil { - return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err) - } - - return nil -} - -// closeTraceFSProbeEvent removes the [k,u]probe with the given type, group and symbol -// from /[k,u]probe_events. -func closeTraceFSProbeEvent(typ probeType, group, symbol string) error { - f, err := os.OpenFile(typ.EventsPath(), os.O_APPEND|os.O_WRONLY, 0666) - if err != nil { - return fmt.Errorf("error opening %s: %w", typ.EventsPath(), err) - } - defer f.Close() - - // See [k,u]probe_events syntax above. The probe type does not need to be specified - // for removals. - pe := fmt.Sprintf("-:%s/%s", group, symbol) - if _, err = f.WriteString(pe); err != nil { - return fmt.Errorf("writing '%s' to '%s': %w", pe, typ.EventsPath(), err) - } - - return nil -} - -// randomGroup generates a pseudorandom string for use as a tracefs group name. -// Returns an error when the output string would exceed 63 characters (kernel -// limitation), when rand.Read() fails or when prefix contains characters not -// allowed by rgxTraceEvent. -func randomGroup(prefix string) (string, error) { - if !rgxTraceEvent.MatchString(prefix) { - return "", fmt.Errorf("prefix '%s' must be alphanumeric or underscore: %w", prefix, errInvalidInput) - } - - b := make([]byte, 8) - if _, err := rand.Read(b); err != nil { - return "", fmt.Errorf("reading random bytes: %w", err) - } - - group := fmt.Sprintf("%s_%x", prefix, b) - if len(group) > 63 { - return "", fmt.Errorf("group name '%s' cannot be longer than 63 characters: %w", group, errInvalidInput) - } - - return group, nil -} - -func probePrefix(ret bool) string { - if ret { - return "r" - } - return "p" -} - -// determineRetprobeBit reads a Performance Monitoring Unit's retprobe bit -// from /sys/bus/event_source/devices//format/retprobe. -func determineRetprobeBit(typ probeType) (uint64, error) { - p := filepath.Join("/sys/bus/event_source/devices/", typ.String(), "/format/retprobe") - - data, err := os.ReadFile(p) - if err != nil { - return 0, err - } - - var rp uint64 - n, err := fmt.Sscanf(string(bytes.TrimSpace(data)), "config:%d", &rp) - if err != nil { - return 0, fmt.Errorf("parse retprobe bit: %w", err) - } - if n != 1 { - return 0, fmt.Errorf("parse retprobe bit: expected 1 item, got %d", n) - } - - return rp, nil -} - -func kretprobeBit() (uint64, error) { - kprobeRetprobeBit.once.Do(func() { - kprobeRetprobeBit.value, kprobeRetprobeBit.err = determineRetprobeBit(kprobeType) - }) - return kprobeRetprobeBit.value, kprobeRetprobeBit.err + return newPerfEvent(fd, evt), nil } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/kprobe_multi.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/kprobe_multi.go new file mode 100644 index 000000000..3a2b06a24 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/link/kprobe_multi.go @@ -0,0 +1,256 @@ +//go:build !windows + +package link + +import ( + "errors" + "fmt" + "os" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// KprobeMultiOptions defines additional parameters that will be used +// when opening a KprobeMulti Link. +type KprobeMultiOptions struct { + // Symbols takes a list of kernel symbol names to attach an ebpf program to. + // + // Mutually exclusive with Addresses. + Symbols []string + + // Addresses takes a list of kernel symbol addresses in case they can not + // be referred to by name. + // + // Note that only start addresses can be specified, since the fprobe API + // limits the attach point to the function entry or return. + // + // Mutually exclusive with Symbols. + Addresses []uintptr + + // Cookies specifies arbitrary values that can be fetched from an eBPF + // program via `bpf_get_attach_cookie()`. + // + // If set, its length should be equal to the length of Symbols or Addresses. + // Each Cookie is assigned to the Symbol or Address specified at the + // corresponding slice index. + Cookies []uint64 + + // Session must be true when attaching Programs with the + // [ebpf.AttachTraceKprobeSession] attach type. + // + // This makes a Kprobe execute on both function entry and return. The entry + // program can share a cookie value with the return program and can decide + // whether the return program gets executed. + Session bool +} + +// KprobeMulti attaches the given eBPF program to the entry point of a given set +// of kernel symbols. +// +// The difference with Kprobe() is that multi-kprobe accomplishes this in a +// single system call, making it significantly faster than attaching many +// probes one at a time. +// +// Requires at least Linux 5.18. +func KprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions) (Link, error) { + return kprobeMulti(prog, opts, 0) +} + +// KretprobeMulti attaches the given eBPF program to the return point of a given +// set of kernel symbols. +// +// The difference with Kretprobe() is that multi-kprobe accomplishes this in a +// single system call, making it significantly faster than attaching many +// probes one at a time. +// +// Requires at least Linux 5.18. +func KretprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions) (Link, error) { + return kprobeMulti(prog, opts, sys.BPF_F_KPROBE_MULTI_RETURN) +} + +func kprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions, flags uint32) (Link, error) { + if prog == nil { + return nil, errors.New("cannot attach a nil program") + } + + syms := uint32(len(opts.Symbols)) + addrs := uint32(len(opts.Addresses)) + cookies := uint32(len(opts.Cookies)) + + if syms == 0 && addrs == 0 { + return nil, fmt.Errorf("one of Symbols or Addresses is required: %w", errInvalidInput) + } + if syms != 0 && addrs != 0 { + return nil, fmt.Errorf("fields Symbols and Addresses are mutually exclusive: %w", errInvalidInput) + } + if cookies > 0 && cookies != syms && cookies != addrs { + return nil, fmt.Errorf("field Cookies must be exactly Symbols or Addresses in length: %w", errInvalidInput) + } + + attachType := sys.BPF_TRACE_KPROBE_MULTI + if opts.Session { + attachType = sys.BPF_TRACE_KPROBE_SESSION + } + + attr := &sys.LinkCreateKprobeMultiAttr{ + ProgFd: uint32(prog.FD()), + AttachType: attachType, + KprobeMultiFlags: flags, + } + + switch { + case syms != 0: + attr.Count = syms + attr.Syms = sys.NewStringSlicePointer(opts.Symbols) + + case addrs != 0: + attr.Count = addrs + attr.Addrs = sys.SlicePointer(opts.Addresses) + } + + if cookies != 0 { + attr.Cookies = sys.SlicePointer(opts.Cookies) + } + + fd, err := sys.LinkCreateKprobeMulti(attr) + if err == nil { + return &kprobeMultiLink{RawLink{fd, ""}}, nil + } + + if errors.Is(err, unix.ESRCH) { + return nil, fmt.Errorf("couldn't find one or more symbols: %w", os.ErrNotExist) + } + + if opts.Session { + if haveFeatErr := haveBPFLinkKprobeSession(); haveFeatErr != nil { + return nil, haveFeatErr + } + } else { + if haveFeatErr := haveBPFLinkKprobeMulti(); haveFeatErr != nil { + return nil, haveFeatErr + } + } + + // Check EINVAL after running feature probes, since it's also returned when + // the kernel doesn't support the multi/session attach types. + if errors.Is(err, unix.EINVAL) { + return nil, fmt.Errorf("%w (missing kernel symbol or prog's AttachType not %s?)", err, ebpf.AttachType(attachType)) + } + + return nil, err +} + +type kprobeMultiLink struct { + RawLink +} + +var _ Link = (*kprobeMultiLink)(nil) + +func (kml *kprobeMultiLink) Update(_ *ebpf.Program) error { + return fmt.Errorf("update kprobe_multi: %w", ErrNotSupported) +} + +func (kml *kprobeMultiLink) Info() (*Info, error) { + var info sys.KprobeMultiLinkInfo + if err := sys.ObjInfo(kml.fd, &info); err != nil { + return nil, fmt.Errorf("kprobe multi link info: %s", err) + } + extra := &KprobeMultiInfo{ + count: info.Count, + flags: info.Flags, + missed: info.Missed, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} + +var haveBPFLinkKprobeMulti = internal.NewFeatureTest("bpf_link_kprobe_multi", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Name: "probe_kpm_link", + Type: ebpf.Kprobe, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + AttachType: ebpf.AttachTraceKprobeMulti, + License: "MIT", + }) + if errors.Is(err, unix.E2BIG) { + // Kernel doesn't support AttachType field. + return internal.ErrNotSupported + } + if err != nil { + return err + } + defer prog.Close() + + fd, err := sys.LinkCreateKprobeMulti(&sys.LinkCreateKprobeMultiAttr{ + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_TRACE_KPROBE_MULTI, + Count: 1, + Syms: sys.NewStringSlicePointer([]string{"vprintk"}), + }) + switch { + case errors.Is(err, unix.EINVAL): + return internal.ErrNotSupported + // If CONFIG_FPROBE isn't set. + case errors.Is(err, unix.EOPNOTSUPP): + return internal.ErrNotSupported + case err != nil: + return err + } + + fd.Close() + + return nil +}, "5.18") + +var haveBPFLinkKprobeSession = internal.NewFeatureTest("bpf_link_kprobe_session", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Name: "probe_kps_link", + Type: ebpf.Kprobe, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + AttachType: ebpf.AttachTraceKprobeSession, + License: "MIT", + }) + if errors.Is(err, unix.E2BIG) { + // Kernel doesn't support AttachType field. + return internal.ErrNotSupported + } + if err != nil { + return err + } + defer prog.Close() + + fd, err := sys.LinkCreateKprobeMulti(&sys.LinkCreateKprobeMultiAttr{ + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_TRACE_KPROBE_SESSION, + Count: 1, + Syms: sys.NewStringSlicePointer([]string{"vprintk"}), + }) + switch { + case errors.Is(err, unix.EINVAL): + return internal.ErrNotSupported + // If CONFIG_FPROBE isn't set. + case errors.Is(err, unix.EOPNOTSUPP): + return internal.ErrNotSupported + case err != nil: + return err + } + + fd.Close() + + return nil +}, "6.10") diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/link.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/link.go index 3aa49a68e..aaff08a15 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/link/link.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/link/link.go @@ -1,16 +1,20 @@ package link import ( - "bytes" - "encoding/binary" + "errors" "fmt" + "os" "github.com/cilium/ebpf" + "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/btf" "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" ) +// Type is the kind of link. +type Type = sys.LinkType + var ErrNotSupported = internal.ErrNotSupported // Link represents a Program attached to a BPF hook. @@ -37,6 +41,11 @@ type Link interface { // not called. Close() error + // Detach the link from its corresponding attachment point. + // + // May return an error wrapping ErrNotSupported. + Detach() error + // Info returns metadata on a link. // // May return an error wrapping ErrNotSupported. @@ -46,40 +55,48 @@ type Link interface { isLink() } -// LoadPinnedLink loads a link that was persisted into a bpffs. -func LoadPinnedLink(fileName string, opts *ebpf.LoadPinOptions) (Link, error) { - raw, err := loadPinnedRawLink(fileName, opts) +// NewLinkFromFD creates a link from a raw fd. +// +// Deprecated: use [NewFromFD] instead. +func NewLinkFromFD(fd int) (Link, error) { + return NewFromFD(fd) +} + +// NewFromFD creates a link from a raw fd. +// +// You should not use fd after calling this function. +func NewFromFD(fd int) (Link, error) { + sysFD, err := sys.NewFD(fd) if err != nil { return nil, err } - return wrapRawLink(raw) + return wrapRawLink(&RawLink{fd: sysFD}) } -// wrap a RawLink in a more specific type if possible. +// NewFromID returns the link associated with the given id. // -// The function takes ownership of raw and closes it on error. -func wrapRawLink(raw *RawLink) (Link, error) { - info, err := raw.Info() +// Returns ErrNotExist if there is no link with the given id. +func NewFromID(id ID) (Link, error) { + getFdAttr := &sys.LinkGetFdByIdAttr{Id: id} + fd, err := sys.LinkGetFdById(getFdAttr) if err != nil { - raw.Close() - return nil, err + return nil, fmt.Errorf("get link fd from ID %d: %w", id, err) } - switch info.Type { - case RawTracepointType: - return &rawTracepoint{*raw}, nil - case TracingType: - return &tracing{*raw}, nil - case CgroupType: - return &linkCgroup{*raw}, nil - case IterType: - return &Iter{*raw}, nil - case NetNsType: - return &NetNsLink{*raw}, nil - default: - return raw, nil + return wrapRawLink(&RawLink{fd, ""}) +} + +// LoadPinnedLink loads a Link from a pin (file) on the BPF virtual filesystem. +// +// Requires at least Linux 5.7. +func LoadPinnedLink(fileName string, opts *ebpf.LoadPinOptions) (Link, error) { + raw, err := loadPinnedRawLink(fileName, opts) + if err != nil { + return nil, err } + + return wrapRawLink(raw) } // ID uniquely identifies a BPF link. @@ -107,48 +124,6 @@ type Info struct { extra interface{} } -// RawLinkInfo contains information on a raw link. -// -// Deprecated: use Info instead. -type RawLinkInfo = Info - -type TracingInfo sys.TracingLinkInfo -type CgroupInfo sys.CgroupLinkInfo -type NetNsInfo sys.NetNsLinkInfo -type XDPInfo sys.XDPLinkInfo - -// Tracing returns tracing type-specific link info. -// -// Returns nil if the type-specific link info isn't available. -func (r Info) Tracing() *TracingInfo { - e, _ := r.extra.(*TracingInfo) - return e -} - -// Cgroup returns cgroup type-specific link info. -// -// Returns nil if the type-specific link info isn't available. -func (r Info) Cgroup() *CgroupInfo { - e, _ := r.extra.(*CgroupInfo) - return e -} - -// NetNs returns netns type-specific link info. -// -// Returns nil if the type-specific link info isn't available. -func (r Info) NetNs() *NetNsInfo { - e, _ := r.extra.(*NetNsInfo) - return e -} - -// ExtraNetNs returns XDP type-specific link info. -// -// Returns nil if the type-specific link info isn't available. -func (r Info) XDP() *XDPInfo { - e, _ := r.extra.(*XDPInfo) - return e -} - // RawLink is the low-level API to bpf_link. // // You should consider using the higher level interfaces in this @@ -158,68 +133,8 @@ type RawLink struct { pinnedPath string } -// AttachRawLink creates a raw link. -func AttachRawLink(opts RawLinkOptions) (*RawLink, error) { - if err := haveBPFLink(); err != nil { - return nil, err - } - - if opts.Target < 0 { - return nil, fmt.Errorf("invalid target: %s", sys.ErrClosedFd) - } - - progFd := opts.Program.FD() - if progFd < 0 { - return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) - } - - attr := sys.LinkCreateAttr{ - TargetFd: uint32(opts.Target), - ProgFd: uint32(progFd), - AttachType: sys.AttachType(opts.Attach), - TargetBtfId: uint32(opts.BTF), - Flags: opts.Flags, - } - fd, err := sys.LinkCreate(&attr) - if err != nil { - return nil, fmt.Errorf("can't create link: %s", err) - } - - return &RawLink{fd, ""}, nil -} - -// LoadPinnedRawLink loads a persisted link from a bpffs. -// -// Returns an error if the pinned link type doesn't match linkType. Pass -// UnspecifiedType to disable this behaviour. -// -// Deprecated: use LoadPinnedLink instead. -func LoadPinnedRawLink(fileName string, linkType Type, opts *ebpf.LoadPinOptions) (*RawLink, error) { - link, err := loadPinnedRawLink(fileName, opts) - if err != nil { - return nil, err - } - - if linkType == UnspecifiedType { - return link, nil - } - - info, err := link.Info() - if err != nil { - link.Close() - return nil, fmt.Errorf("get pinned link info: %w", err) - } - - if info.Type != linkType { - link.Close() - return nil, fmt.Errorf("link type %v doesn't match %v", info.Type, linkType) - } - - return link, nil -} - func loadPinnedRawLink(fileName string, opts *ebpf.LoadPinOptions) (*RawLink, error) { - fd, err := sys.ObjGet(&sys.ObjGetAttr{ + fd, typ, err := sys.ObjGetTyped(&sys.ObjGetAttr{ Pathname: sys.NewStringPointer(fileName), FileFlags: opts.Marshal(), }) @@ -227,6 +142,11 @@ func loadPinnedRawLink(fileName string, opts *ebpf.LoadPinOptions) (*RawLink, er return nil, fmt.Errorf("load pinned link: %w", err) } + if typ != sys.BPF_TYPE_LINK { + _ = fd.Close() + return nil, fmt.Errorf("%s is not a Link", fileName) + } + return &RawLink{fd, fileName}, nil } @@ -249,7 +169,7 @@ func (l *RawLink) Close() error { // Calling Close on a pinned Link will not break the link // until the pin is removed. func (l *RawLink) Pin(fileName string) error { - if err := internal.Pin(l.pinnedPath, fileName, l.fd); err != nil { + if err := sys.Pin(l.pinnedPath, fileName, l.fd); err != nil { return err } l.pinnedPath = fileName @@ -258,13 +178,18 @@ func (l *RawLink) Pin(fileName string) error { // Unpin implements the Link interface. func (l *RawLink) Unpin() error { - if err := internal.Unpin(l.pinnedPath); err != nil { + if err := sys.Unpin(l.pinnedPath); err != nil { return err } l.pinnedPath = "" return nil } +// IsPinned returns true if the Link has a non-empty pinned path. +func (l *RawLink) IsPinned() bool { + return l.pinnedPath != "" +} + // Update implements the Link interface. func (l *RawLink) Update(new *ebpf.Program) error { return l.UpdateArgs(RawLinkUpdateOptions{ @@ -300,10 +225,34 @@ func (l *RawLink) UpdateArgs(opts RawLinkUpdateOptions) error { OldProgFd: uint32(oldFd), Flags: opts.Flags, } - return sys.LinkUpdate(&attr) + if err := sys.LinkUpdate(&attr); err != nil { + return fmt.Errorf("update link: %w", err) + } + return nil +} + +// Detach the link from its corresponding attachment point. +func (l *RawLink) Detach() error { + attr := sys.LinkDetachAttr{ + LinkFd: l.fd.Uint(), + } + + err := sys.LinkDetach(&attr) + + switch { + case errors.Is(err, unix.EOPNOTSUPP): + return internal.ErrNotSupported + case err != nil: + return fmt.Errorf("detach link: %w", err) + default: + return nil + } } // Info returns metadata about the link. +// +// Linktype specific metadata is not included and can be retrieved +// via the linktype specific Info() method. func (l *RawLink) Info() (*Info, error) { var info sys.LinkInfo @@ -311,36 +260,81 @@ func (l *RawLink) Info() (*Info, error) { return nil, fmt.Errorf("link info: %s", err) } - var extra interface{} - switch info.Type { - case CgroupType: - extra = &CgroupInfo{} - case IterType: - // not supported - case NetNsType: - extra = &NetNsInfo{} - case RawTracepointType: - // not supported - case TracingType: - extra = &TracingInfo{} - case XDPType: - extra = &XDPInfo{} - default: - return nil, fmt.Errorf("unknown link info type: %d", info.Type) - } - - if info.Type != RawTracepointType && info.Type != IterType { - buf := bytes.NewReader(info.Extra[:]) - err := binary.Read(buf, internal.NativeEndian, extra) - if err != nil { - return nil, fmt.Errorf("can not read extra link info: %w", err) - } - } - return &Info{ info.Type, info.Id, ebpf.ProgramID(info.ProgId), - extra, + nil, }, nil } + +// Iterator allows iterating over links attached into the kernel. +type Iterator struct { + // The ID of the current link. Only valid after a call to Next + ID ID + // The current link. Only valid until a call to Next. + // See Take if you want to retain the link. + Link Link + err error +} + +// Next retrieves the next link. +// +// Returns true if another link was found. Call [Iterator.Err] after the function returns false. +func (it *Iterator) Next() bool { + id := it.ID + for { + getIdAttr := &sys.LinkGetNextIdAttr{Id: id} + err := sys.LinkGetNextId(getIdAttr) + if errors.Is(err, os.ErrNotExist) { + // There are no more links. + break + } else if err != nil { + it.err = fmt.Errorf("get next link ID: %w", err) + break + } + + id = getIdAttr.NextId + l, err := NewFromID(id) + if errors.Is(err, os.ErrNotExist) { + // Couldn't load the link fast enough. Try next ID. + continue + } else if err != nil { + it.err = fmt.Errorf("get link for ID %d: %w", id, err) + break + } + + if it.Link != nil { + it.Link.Close() + } + it.ID, it.Link = id, l + return true + } + + // No more links or we encountered an error. + if it.Link != nil { + it.Link.Close() + } + it.Link = nil + return false +} + +// Take the ownership of the current link. +// +// It's the callers responsibility to close the link. +func (it *Iterator) Take() Link { + l := it.Link + it.Link = nil + return l +} + +// Err returns an error if iteration failed for some reason. +func (it *Iterator) Err() error { + return it.err +} + +func (it *Iterator) Close() { + if it.Link != nil { + it.Link.Close() + } +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/link_other.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/link_other.go new file mode 100644 index 000000000..cd9452fd8 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/link/link_other.go @@ -0,0 +1,260 @@ +//go:build !windows + +package link + +import ( + "fmt" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/sys" +) + +// Valid link types. +const ( + UnspecifiedType = sys.BPF_LINK_TYPE_UNSPEC + RawTracepointType = sys.BPF_LINK_TYPE_RAW_TRACEPOINT + TracingType = sys.BPF_LINK_TYPE_TRACING + CgroupType = sys.BPF_LINK_TYPE_CGROUP + IterType = sys.BPF_LINK_TYPE_ITER + NetNsType = sys.BPF_LINK_TYPE_NETNS + XDPType = sys.BPF_LINK_TYPE_XDP + PerfEventType = sys.BPF_LINK_TYPE_PERF_EVENT + KprobeMultiType = sys.BPF_LINK_TYPE_KPROBE_MULTI + TCXType = sys.BPF_LINK_TYPE_TCX + UprobeMultiType = sys.BPF_LINK_TYPE_UPROBE_MULTI + NetfilterType = sys.BPF_LINK_TYPE_NETFILTER + NetkitType = sys.BPF_LINK_TYPE_NETKIT +) + +// AttachRawLink creates a raw link. +func AttachRawLink(opts RawLinkOptions) (*RawLink, error) { + if err := haveBPFLink(); err != nil { + return nil, err + } + + if opts.Target < 0 { + return nil, fmt.Errorf("invalid target: %s", sys.ErrClosedFd) + } + + progFd := opts.Program.FD() + if progFd < 0 { + return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) + } + + p, attachType := platform.DecodeConstant(opts.Attach) + if p != platform.Linux { + return nil, fmt.Errorf("attach type %s: %w", opts.Attach, internal.ErrNotSupportedOnOS) + } + + attr := sys.LinkCreateAttr{ + TargetFd: uint32(opts.Target), + ProgFd: uint32(progFd), + AttachType: sys.AttachType(attachType), + TargetBtfId: opts.BTF, + Flags: opts.Flags, + } + fd, err := sys.LinkCreate(&attr) + if err != nil { + return nil, fmt.Errorf("create link: %w", err) + } + + return &RawLink{fd, ""}, nil +} + +// wrap a RawLink in a more specific type if possible. +// +// The function takes ownership of raw and closes it on error. +func wrapRawLink(raw *RawLink) (_ Link, err error) { + defer func() { + if err != nil { + raw.Close() + } + }() + + info, err := raw.Info() + if err != nil { + return nil, err + } + + switch info.Type { + case RawTracepointType: + return &rawTracepoint{*raw}, nil + case TracingType: + return &tracing{*raw}, nil + case CgroupType: + return &linkCgroup{*raw}, nil + case IterType: + return &Iter{*raw}, nil + case NetNsType: + return &NetNsLink{*raw}, nil + case KprobeMultiType: + return &kprobeMultiLink{*raw}, nil + case UprobeMultiType: + return &uprobeMultiLink{*raw}, nil + case PerfEventType: + return &perfEventLink{*raw, nil}, nil + case TCXType: + return &tcxLink{*raw}, nil + case NetfilterType: + return &netfilterLink{*raw}, nil + case NetkitType: + return &netkitLink{*raw}, nil + case XDPType: + return &xdpLink{*raw}, nil + default: + return raw, nil + } +} + +type TracingInfo struct { + AttachType sys.AttachType + TargetObjId uint32 + TargetBtfId sys.TypeID +} + +type CgroupInfo struct { + CgroupId uint64 + AttachType sys.AttachType + _ [4]byte +} + +type NetNsInfo struct { + NetnsIno uint32 + AttachType sys.AttachType +} + +type TCXInfo struct { + Ifindex uint32 + AttachType sys.AttachType +} + +type XDPInfo struct { + Ifindex uint32 +} + +type NetfilterInfo struct { + Pf uint32 + Hooknum uint32 + Priority int32 + Flags uint32 +} + +type NetkitInfo struct { + Ifindex uint32 + AttachType sys.AttachType +} + +type KprobeMultiInfo struct { + count uint32 + flags uint32 + missed uint64 +} + +// AddressCount is the number of addresses hooked by the kprobe. +func (kpm *KprobeMultiInfo) AddressCount() (uint32, bool) { + return kpm.count, kpm.count > 0 +} + +func (kpm *KprobeMultiInfo) Flags() (uint32, bool) { + return kpm.flags, kpm.count > 0 +} + +func (kpm *KprobeMultiInfo) Missed() (uint64, bool) { + return kpm.missed, kpm.count > 0 +} + +type PerfEventInfo struct { + Type sys.PerfEventType + extra interface{} +} + +func (r *PerfEventInfo) Kprobe() *KprobeInfo { + e, _ := r.extra.(*KprobeInfo) + return e +} + +type KprobeInfo struct { + address uint64 + missed uint64 +} + +func (kp *KprobeInfo) Address() (uint64, bool) { + return kp.address, kp.address > 0 +} + +func (kp *KprobeInfo) Missed() (uint64, bool) { + return kp.missed, kp.address > 0 +} + +// Tracing returns tracing type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Tracing() *TracingInfo { + e, _ := r.extra.(*TracingInfo) + return e +} + +// Cgroup returns cgroup type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Cgroup() *CgroupInfo { + e, _ := r.extra.(*CgroupInfo) + return e +} + +// NetNs returns netns type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) NetNs() *NetNsInfo { + e, _ := r.extra.(*NetNsInfo) + return e +} + +// XDP returns XDP type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) XDP() *XDPInfo { + e, _ := r.extra.(*XDPInfo) + return e +} + +// TCX returns TCX type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) TCX() *TCXInfo { + e, _ := r.extra.(*TCXInfo) + return e +} + +// Netfilter returns netfilter type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Netfilter() *NetfilterInfo { + e, _ := r.extra.(*NetfilterInfo) + return e +} + +// Netkit returns netkit type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Netkit() *NetkitInfo { + e, _ := r.extra.(*NetkitInfo) + return e +} + +// KprobeMulti returns kprobe-multi type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) KprobeMulti() *KprobeMultiInfo { + e, _ := r.extra.(*KprobeMultiInfo) + return e +} + +// PerfEvent returns perf-event type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) PerfEvent() *PerfEventInfo { + e, _ := r.extra.(*PerfEventInfo) + return e +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/link_windows.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/link_windows.go new file mode 100644 index 000000000..d9c6f8890 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/link/link_windows.go @@ -0,0 +1,48 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/efw" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/sys" +) + +// AttachRawLink creates a raw link. +func AttachRawLink(opts RawLinkOptions) (*RawLink, error) { + if opts.Target != 0 || opts.BTF != 0 || opts.Flags != 0 { + return nil, fmt.Errorf("specified option(s) %w", internal.ErrNotSupportedOnOS) + } + + plat, attachType := platform.DecodeConstant(opts.Attach) + if plat != platform.Windows { + return nil, fmt.Errorf("attach type %s: %w", opts.Attach, internal.ErrNotSupportedOnOS) + } + + attachTypeGUID, err := efw.EbpfGetEbpfAttachType(attachType) + if err != nil { + return nil, fmt.Errorf("get attach type: %w", err) + } + + progFd := opts.Program.FD() + if progFd < 0 { + return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) + } + + raw, err := efw.EbpfProgramAttachFds(progFd, attachTypeGUID, nil, 0) + if err != nil { + return nil, fmt.Errorf("attach link: %w", err) + } + + fd, err := sys.NewFD(int(raw)) + if err != nil { + return nil, err + } + + return &RawLink{fd: fd}, nil +} + +func wrapRawLink(raw *RawLink) (Link, error) { + return raw, nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/netfilter.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/netfilter.go new file mode 100644 index 000000000..90e914c51 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/link/netfilter.go @@ -0,0 +1,92 @@ +//go:build !windows + +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +const NetfilterIPDefrag NetfilterAttachFlags = 0 // Enable IP packet defragmentation + +type NetfilterAttachFlags uint32 + +type NetfilterOptions struct { + // Program must be a netfilter BPF program. + Program *ebpf.Program + // The protocol family. + ProtocolFamily uint32 + // The number of the hook you are interested in. + HookNumber uint32 + // Priority within hook + Priority int32 + // Extra link flags + Flags uint32 + // Netfilter flags + NetfilterFlags NetfilterAttachFlags +} + +type netfilterLink struct { + RawLink +} + +// AttachNetfilter links a netfilter BPF program to a netfilter hook. +func AttachNetfilter(opts NetfilterOptions) (Link, error) { + if opts.Program == nil { + return nil, fmt.Errorf("netfilter program is nil") + } + + if t := opts.Program.Type(); t != ebpf.Netfilter { + return nil, fmt.Errorf("invalid program type %s, expected netfilter", t) + } + + progFd := opts.Program.FD() + if progFd < 0 { + return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) + } + + attr := sys.LinkCreateNetfilterAttr{ + ProgFd: uint32(opts.Program.FD()), + AttachType: sys.BPF_NETFILTER, + Flags: opts.Flags, + Pf: uint32(opts.ProtocolFamily), + Hooknum: uint32(opts.HookNumber), + Priority: opts.Priority, + NetfilterFlags: uint32(opts.NetfilterFlags), + } + + fd, err := sys.LinkCreateNetfilter(&attr) + if err != nil { + return nil, fmt.Errorf("attach netfilter link: %w", err) + } + + return &netfilterLink{RawLink{fd, ""}}, nil +} + +func (*netfilterLink) Update(_ *ebpf.Program) error { + return fmt.Errorf("netfilter update: %w", ErrNotSupported) +} + +func (nf *netfilterLink) Info() (*Info, error) { + var info sys.NetfilterLinkInfo + if err := sys.ObjInfo(nf.fd, &info); err != nil { + return nil, fmt.Errorf("netfilter link info: %s", err) + } + extra := &NetfilterInfo{ + Pf: info.Pf, + Hooknum: info.Hooknum, + Priority: info.Priority, + Flags: info.Flags, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} + +var _ Link = (*netfilterLink)(nil) diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/netkit.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/netkit.go new file mode 100644 index 000000000..5e6a321af --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/link/netkit.go @@ -0,0 +1,91 @@ +//go:build !windows + +package link + +import ( + "fmt" + "runtime" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +type NetkitOptions struct { + // Index of the interface to attach to. + Interface int + // Program to attach. + Program *ebpf.Program + // One of the AttachNetkit* constants. + Attach ebpf.AttachType + // Attach relative to an anchor. Optional. + Anchor Anchor + // Only attach if the expected revision matches. + ExpectedRevision uint64 + // Flags control the attach behaviour. Specify an Anchor instead of + // F_LINK, F_ID, F_BEFORE, F_AFTER and R_REPLACE. Optional. + Flags uint32 +} + +func AttachNetkit(opts NetkitOptions) (Link, error) { + if opts.Interface < 0 { + return nil, fmt.Errorf("interface %d is out of bounds", opts.Interface) + } + + if opts.Flags&anchorFlags != 0 { + return nil, fmt.Errorf("disallowed flags: use Anchor to specify attach target") + } + + attr := sys.LinkCreateNetkitAttr{ + ProgFd: uint32(opts.Program.FD()), + AttachType: sys.AttachType(opts.Attach), + TargetIfindex: uint32(opts.Interface), + ExpectedRevision: opts.ExpectedRevision, + Flags: opts.Flags, + } + + if opts.Anchor != nil { + fdOrID, flags, err := opts.Anchor.anchor() + if err != nil { + return nil, fmt.Errorf("attach netkit link: %w", err) + } + + attr.RelativeFdOrId = fdOrID + attr.Flags |= flags + } + + fd, err := sys.LinkCreateNetkit(&attr) + runtime.KeepAlive(opts.Program) + runtime.KeepAlive(opts.Anchor) + if err != nil { + if haveFeatErr := haveNetkit(); haveFeatErr != nil { + return nil, haveFeatErr + } + return nil, fmt.Errorf("attach netkit link: %w", err) + } + + return &netkitLink{RawLink{fd, ""}}, nil +} + +type netkitLink struct { + RawLink +} + +var _ Link = (*netkitLink)(nil) + +func (netkit *netkitLink) Info() (*Info, error) { + var info sys.NetkitLinkInfo + if err := sys.ObjInfo(netkit.fd, &info); err != nil { + return nil, fmt.Errorf("netkit link info: %s", err) + } + extra := &NetkitInfo{ + Ifindex: info.Ifindex, + AttachType: info.AttachType, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/netns.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/netns.go index f49cbe4d7..a9f7ee79c 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/link/netns.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/link/netns.go @@ -1,9 +1,12 @@ +//go:build !windows + package link import ( "fmt" "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" ) // NetNsLink is a program attached to a network namespace. @@ -35,14 +38,20 @@ func AttachNetNs(ns int, prog *ebpf.Program) (*NetNsLink, error) { return &NetNsLink{*link}, nil } -// LoadPinnedNetNs loads a network namespace link from bpffs. -// -// Deprecated: use LoadPinnedLink instead. -func LoadPinnedNetNs(fileName string, opts *ebpf.LoadPinOptions) (*NetNsLink, error) { - link, err := LoadPinnedRawLink(fileName, NetNsType, opts) - if err != nil { - return nil, err +func (ns *NetNsLink) Info() (*Info, error) { + var info sys.NetNsLinkInfo + if err := sys.ObjInfo(ns.fd, &info); err != nil { + return nil, fmt.Errorf("netns link info: %s", err) + } + extra := &NetNsInfo{ + NetnsIno: info.NetnsIno, + AttachType: info.AttachType, } - return &NetNsLink{*link}, nil + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/perf_event.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/perf_event.go index ef24660f4..22c78ed92 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/link/perf_event.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/link/perf_event.go @@ -1,19 +1,18 @@ +//go:build !windows + package link import ( - "bytes" "errors" "fmt" "os" - "path/filepath" - "regexp" - "runtime" - "strconv" - "strings" "unsafe" "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/tracefs" "github.com/cilium/ebpf/internal/unix" ) @@ -41,60 +40,138 @@ import ( // stops any further invocations of the attached eBPF program. var ( - tracefsPath = "/sys/kernel/debug/tracing" - - // Trace event groups, names and kernel symbols must adhere to this set - // of characters. Non-empty, first character must not be a number, all - // characters must be alphanumeric or underscore. - rgxTraceEvent = regexp.MustCompile("^[a-zA-Z_][0-9a-zA-Z_]*$") - - errInvalidInput = errors.New("invalid input") + errInvalidInput = tracefs.ErrInvalidInput ) const ( perfAllThreads = -1 ) -type perfEventType uint8 - -const ( - tracepointEvent perfEventType = iota - kprobeEvent - kretprobeEvent - uprobeEvent - uretprobeEvent -) - // A perfEvent represents a perf event kernel object. Exactly one eBPF program // can be attached to it. It is created based on a tracefs trace event or a // Performance Monitoring Unit (PMU). type perfEvent struct { + // Trace event backing this perfEvent. May be nil. + tracefsEvent *tracefs.Event + + // This is the perf event FD. + fd *sys.FD +} + +func newPerfEvent(fd *sys.FD, event *tracefs.Event) *perfEvent { + pe := &perfEvent{event, fd} + return pe +} + +func (pe *perfEvent) Close() error { + // We close the perf event before attempting to remove the tracefs event. + if err := pe.fd.Close(); err != nil { + return fmt.Errorf("closing perf event fd: %w", err) + } - // Group and name of the tracepoint/kprobe/uprobe. - group string - name string + if pe.tracefsEvent != nil { + return pe.tracefsEvent.Close() + } - // PMU event ID read from sysfs. Valid IDs are non-zero. - pmuID uint64 - // ID of the trace event read from tracefs. Valid IDs are non-zero. - tracefsID uint64 + return nil +} - // The event type determines the types of programs that can be attached. - typ perfEventType +// PerfEvent is implemented by some Link types which use a perf event under +// the hood. +type PerfEvent interface { + // PerfEvent returns a file for the underlying perf event. + // + // It is the callers responsibility to close the returned file. + // + // Making changes to the associated perf event lead to + // undefined behaviour. + PerfEvent() (*os.File, error) +} - fd *sys.FD +// perfEventLink represents a bpf perf link. +type perfEventLink struct { + RawLink + pe *perfEvent +} + +func (pl *perfEventLink) isLink() {} + +func (pl *perfEventLink) Close() error { + if err := pl.fd.Close(); err != nil { + return fmt.Errorf("perf link close: %w", err) + } + + // when created from pinned link + if pl.pe == nil { + return nil + } + + if err := pl.pe.Close(); err != nil { + return fmt.Errorf("perf event close: %w", err) + } + return nil +} + +func (pl *perfEventLink) Update(_ *ebpf.Program) error { + return fmt.Errorf("perf event link update: %w", ErrNotSupported) +} + +var _ PerfEvent = (*perfEventLink)(nil) + +func (pl *perfEventLink) PerfEvent() (*os.File, error) { + // when created from pinned link + if pl.pe == nil { + return nil, ErrNotSupported + } + + fd, err := pl.pe.fd.Dup() + if err != nil { + return nil, err + } + + return fd.File("perf-event") } -func (pe *perfEvent) isLink() {} +func (pl *perfEventLink) Info() (*Info, error) { + var info sys.PerfEventLinkInfo + if err := sys.ObjInfo(pl.fd, &info); err != nil { + return nil, fmt.Errorf("perf event link info: %s", err) + } -func (pe *perfEvent) Pin(string) error { - return fmt.Errorf("pin perf event: %w", ErrNotSupported) + var extra2 interface{} + switch info.PerfEventType { + case sys.BPF_PERF_EVENT_KPROBE, sys.BPF_PERF_EVENT_KRETPROBE: + var kprobeInfo sys.KprobeLinkInfo + if err := sys.ObjInfo(pl.fd, &kprobeInfo); err != nil { + return nil, fmt.Errorf("kprobe link info: %s", err) + } + extra2 = &KprobeInfo{ + address: kprobeInfo.Addr, + missed: kprobeInfo.Missed, + } + } + + extra := &PerfEventInfo{ + Type: info.PerfEventType, + extra: extra2, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil } -func (pe *perfEvent) Unpin() error { - return fmt.Errorf("unpin perf event: %w", ErrNotSupported) +// perfEventIoctl implements Link and handles the perf event lifecycle +// via ioctl(). +type perfEventIoctl struct { + *perfEvent } +func (pi *perfEventIoctl) isLink() {} + // Since 4.15 (e87c6bc3852b "bpf: permit multiple bpf attachments for a single perf event"), // calling PERF_EVENT_IOC_SET_BPF appends the given program to a prog_array // owned by the perf event, which means multiple programs can be attached @@ -105,129 +182,98 @@ func (pe *perfEvent) Unpin() error { // // Detaching a program from a perf event is currently not possible, so a // program replacement mechanism cannot be implemented for perf events. -func (pe *perfEvent) Update(prog *ebpf.Program) error { - return fmt.Errorf("can't replace eBPF program in perf event: %w", ErrNotSupported) +func (pi *perfEventIoctl) Update(_ *ebpf.Program) error { + return fmt.Errorf("perf event ioctl update: %w", ErrNotSupported) } -func (pe *perfEvent) Info() (*Info, error) { - return nil, fmt.Errorf("can't get perf event info: %w", ErrNotSupported) +func (pi *perfEventIoctl) Pin(string) error { + return fmt.Errorf("perf event ioctl pin: %w", ErrNotSupported) } -func (pe *perfEvent) Close() error { - if pe.fd == nil { - return nil - } +func (pi *perfEventIoctl) Unpin() error { + return fmt.Errorf("perf event ioctl unpin: %w", ErrNotSupported) +} - err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_DISABLE, 0) - if err != nil { - return fmt.Errorf("disabling perf event: %w", err) - } +func (pi *perfEventIoctl) Detach() error { + return fmt.Errorf("perf event ioctl detach: %w", ErrNotSupported) +} - err = pe.fd.Close() - if err != nil { - return fmt.Errorf("closing perf event fd: %w", err) - } +func (pi *perfEventIoctl) Info() (*Info, error) { + return nil, fmt.Errorf("perf event ioctl info: %w", ErrNotSupported) +} - switch pe.typ { - case kprobeEvent, kretprobeEvent: - // Clean up kprobe tracefs entry. - if pe.tracefsID != 0 { - return closeTraceFSProbeEvent(kprobeType, pe.group, pe.name) - } - case uprobeEvent, uretprobeEvent: - // Clean up uprobe tracefs entry. - if pe.tracefsID != 0 { - return closeTraceFSProbeEvent(uprobeType, pe.group, pe.name) - } - case tracepointEvent: - // Tracepoint trace events don't hold any extra resources. - return nil +var _ PerfEvent = (*perfEventIoctl)(nil) + +func (pi *perfEventIoctl) PerfEvent() (*os.File, error) { + fd, err := pi.fd.Dup() + if err != nil { + return nil, err } - return nil + return fd.File("perf-event") } // attach the given eBPF prog to the perf event stored in pe. // pe must contain a valid perf event fd. // prog's type must match the program type stored in pe. -func (pe *perfEvent) attach(prog *ebpf.Program) error { +func attachPerfEvent(pe *perfEvent, prog *ebpf.Program, cookie uint64) (Link, error) { if prog == nil { - return errors.New("cannot attach a nil program") - } - if pe.fd == nil { - return errors.New("cannot attach to nil perf event") + return nil, errors.New("cannot attach a nil program") } if prog.FD() < 0 { - return fmt.Errorf("invalid program: %w", sys.ErrClosedFd) + return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd) } - switch pe.typ { - case kprobeEvent, kretprobeEvent, uprobeEvent, uretprobeEvent: - if t := prog.Type(); t != ebpf.Kprobe { - return fmt.Errorf("invalid program type (expected %s): %s", ebpf.Kprobe, t) - } - case tracepointEvent: - if t := prog.Type(); t != ebpf.TracePoint { - return fmt.Errorf("invalid program type (expected %s): %s", ebpf.TracePoint, t) - } - default: - return fmt.Errorf("unknown perf event type: %d", pe.typ) + + if err := haveBPFLinkPerfEvent(); err == nil { + return attachPerfEventLink(pe, prog, cookie) } - kfd := pe.fd.Int() + if cookie != 0 { + return nil, fmt.Errorf("cookies are not supported: %w", ErrNotSupported) + } + + return attachPerfEventIoctl(pe, prog) +} +func attachPerfEventIoctl(pe *perfEvent, prog *ebpf.Program) (*perfEventIoctl, error) { // Assign the eBPF program to the perf event. - err := unix.IoctlSetInt(int(kfd), unix.PERF_EVENT_IOC_SET_BPF, prog.FD()) + err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_SET_BPF, prog.FD()) if err != nil { - return fmt.Errorf("setting perf event bpf program: %w", err) + return nil, fmt.Errorf("setting perf event bpf program: %w", err) } // PERF_EVENT_IOC_ENABLE and _DISABLE ignore their given values. - if err := unix.IoctlSetInt(int(kfd), unix.PERF_EVENT_IOC_ENABLE, 0); err != nil { - return fmt.Errorf("enable perf event: %s", err) + if err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_ENABLE, 0); err != nil { + return nil, fmt.Errorf("enable perf event: %s", err) } - // Close the perf event when its reference is lost to avoid leaking system resources. - runtime.SetFinalizer(pe, (*perfEvent).Close) - return nil -} - -// unsafeStringPtr returns an unsafe.Pointer to a NUL-terminated copy of str. -func unsafeStringPtr(str string) (unsafe.Pointer, error) { - p, err := unix.BytePtrFromString(str) - if err != nil { - return nil, err - } - return unsafe.Pointer(p), nil + return &perfEventIoctl{pe}, nil } -// getTraceEventID reads a trace event's ID from tracefs given its group and name. -// group and name must be alphanumeric or underscore, as required by the kernel. -func getTraceEventID(group, name string) (uint64, error) { - tid, err := uint64FromFile(tracefsPath, "events", group, name, "id") - if errors.Is(err, os.ErrNotExist) { - return 0, fmt.Errorf("trace event %s/%s: %w", group, name, os.ErrNotExist) - } +// Use the bpf api to attach the perf event (BPF_LINK_TYPE_PERF_EVENT, 5.15+). +// +// https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e +func attachPerfEventLink(pe *perfEvent, prog *ebpf.Program, cookie uint64) (*perfEventLink, error) { + fd, err := sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{ + ProgFd: uint32(prog.FD()), + TargetFd: pe.fd.Uint(), + AttachType: sys.BPF_PERF_EVENT, + BpfCookie: cookie, + }) if err != nil { - return 0, fmt.Errorf("reading trace event ID of %s/%s: %w", group, name, err) + return nil, fmt.Errorf("cannot create bpf perf link: %v", err) } - return tid, nil + return &perfEventLink{RawLink{fd: fd}, pe}, nil } -// getPMUEventType reads a Performance Monitoring Unit's type (numeric identifier) -// from /sys/bus/event_source/devices//type. -// -// Returns ErrNotSupported if the pmu type is not supported. -func getPMUEventType(typ probeType) (uint64, error) { - et, err := uint64FromFile("/sys/bus/event_source/devices", typ.String(), "type") - if errors.Is(err, os.ErrNotExist) { - return 0, fmt.Errorf("pmu type %s: %w", typ, ErrNotSupported) - } +// unsafeStringPtr returns an unsafe.Pointer to a NUL-terminated copy of str. +func unsafeStringPtr(str string) (unsafe.Pointer, error) { + p, err := unix.BytePtrFromString(str) if err != nil { - return 0, fmt.Errorf("reading pmu type %s: %w", typ, err) + return nil, err } - - return et, nil + return unsafe.Pointer(p), nil } // openTracepointPerfEvent opens a tracepoint-type perf event. System-wide @@ -242,7 +288,11 @@ func openTracepointPerfEvent(tid uint64, pid int) (*sys.FD, error) { Wakeup: 1, } - fd, err := unix.PerfEventOpen(&attr, pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC) + cpu := 0 + if pid != perfAllThreads { + cpu = -1 + } + fd, err := unix.PerfEventOpen(&attr, pid, cpu, -1, unix.PERF_FLAG_FD_CLOEXEC) if err != nil { return nil, fmt.Errorf("opening tracepoint perf event: %w", err) } @@ -250,21 +300,34 @@ func openTracepointPerfEvent(tid uint64, pid int) (*sys.FD, error) { return sys.NewFD(fd) } -// uint64FromFile reads a uint64 from a file. All elements of path are sanitized -// and joined onto base. Returns error if base no longer prefixes the path after -// joining all components. -func uint64FromFile(base string, path ...string) (uint64, error) { - l := filepath.Join(path...) - p := filepath.Join(base, l) - if !strings.HasPrefix(p, base) { - return 0, fmt.Errorf("path '%s' attempts to escape base path '%s': %w", l, base, errInvalidInput) - } - - data, err := os.ReadFile(p) +// Probe BPF perf link. +// +// https://elixir.bootlin.com/linux/v5.16.8/source/kernel/bpf/syscall.c#L4307 +// https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e +var haveBPFLinkPerfEvent = internal.NewFeatureTest("bpf_link_perf_event", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Name: "probe_bpf_perf_link", + Type: ebpf.Kprobe, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + License: "MIT", + }) if err != nil { - return 0, fmt.Errorf("reading file %s: %w", p, err) + return err } - - et := bytes.TrimSpace(data) - return strconv.ParseUint(string(et), 10, 64) -} + defer prog.Close() + + _, err = sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{ + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_PERF_EVENT, + }) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if errors.Is(err, unix.EBADF) { + return nil + } + return err +}, "5.15") diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/platform.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/platform.go deleted file mode 100644 index eb6f7b7a3..000000000 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/link/platform.go +++ /dev/null @@ -1,25 +0,0 @@ -package link - -import ( - "fmt" - "runtime" -) - -func platformPrefix(symbol string) string { - - prefix := runtime.GOARCH - - // per https://github.com/golang/go/blob/master/src/go/build/syslist.go - switch prefix { - case "386": - prefix = "ia32" - case "amd64", "amd64p32": - prefix = "x64" - case "arm64", "arm64be": - prefix = "arm64" - default: - return symbol - } - - return fmt.Sprintf("__%s_%s", prefix, symbol) -} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/program.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/program.go index ea3181737..dbd7a9727 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/link/program.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/link/program.go @@ -1,23 +1,30 @@ +//go:build !windows + package link import ( "fmt" + "runtime" "github.com/cilium/ebpf" "github.com/cilium/ebpf/internal/sys" ) type RawAttachProgramOptions struct { - // File descriptor to attach to. This differs for each attach type. + // Target to query. This is usually a file descriptor but may refer to + // something else based on the attach type. Target int // Program to attach. Program *ebpf.Program - // Program to replace (cgroups). - Replace *ebpf.Program - // Attach must match the attach type of Program (and Replace). + // Attach must match the attach type of Program. Attach ebpf.AttachType - // Flags control the attach behaviour. This differs for each attach type. + // Attach relative to an anchor. Optional. + Anchor Anchor + // Flags control the attach behaviour. Specify an Anchor instead of + // F_LINK, F_ID, F_BEFORE, F_AFTER and F_REPLACE. Optional. Flags uint32 + // Only attach if the internal revision matches the given value. + ExpectedRevision uint64 } // RawAttachProgram is a low level wrapper around BPF_PROG_ATTACH. @@ -25,50 +32,76 @@ type RawAttachProgramOptions struct { // You should use one of the higher level abstractions available in this // package if possible. func RawAttachProgram(opts RawAttachProgramOptions) error { - if err := haveProgAttach(); err != nil { - return err + if opts.Flags&anchorFlags != 0 { + return fmt.Errorf("disallowed flags: use Anchor to specify attach target") } - var replaceFd uint32 - if opts.Replace != nil { - replaceFd = uint32(opts.Replace.FD()) + attr := sys.ProgAttachAttr{ + TargetFdOrIfindex: uint32(opts.Target), + AttachBpfFd: uint32(opts.Program.FD()), + AttachType: uint32(opts.Attach), + AttachFlags: uint32(opts.Flags), + ExpectedRevision: opts.ExpectedRevision, } - attr := sys.ProgAttachAttr{ - TargetFd: uint32(opts.Target), - AttachBpfFd: uint32(opts.Program.FD()), - ReplaceBpfFd: replaceFd, - AttachType: uint32(opts.Attach), - AttachFlags: uint32(opts.Flags), + if opts.Anchor != nil { + fdOrID, flags, err := opts.Anchor.anchor() + if err != nil { + return fmt.Errorf("attach program: %w", err) + } + + if flags == sys.BPF_F_REPLACE { + // Ensure that replacing a program works on old kernels. + attr.ReplaceBpfFd = fdOrID + } else { + attr.RelativeFdOrId = fdOrID + attr.AttachFlags |= flags + } } if err := sys.ProgAttach(&attr); err != nil { - return fmt.Errorf("can't attach program: %w", err) + if haveFeatErr := haveProgAttach(); haveFeatErr != nil { + return haveFeatErr + } + return fmt.Errorf("attach program: %w", err) } + runtime.KeepAlive(opts.Program) + return nil } -type RawDetachProgramOptions struct { - Target int - Program *ebpf.Program - Attach ebpf.AttachType -} +type RawDetachProgramOptions RawAttachProgramOptions // RawDetachProgram is a low level wrapper around BPF_PROG_DETACH. // // You should use one of the higher level abstractions available in this // package if possible. func RawDetachProgram(opts RawDetachProgramOptions) error { - if err := haveProgAttach(); err != nil { - return err + if opts.Flags&anchorFlags != 0 { + return fmt.Errorf("disallowed flags: use Anchor to specify attach target") } attr := sys.ProgDetachAttr{ - TargetFd: uint32(opts.Target), - AttachBpfFd: uint32(opts.Program.FD()), - AttachType: uint32(opts.Attach), + TargetFdOrIfindex: uint32(opts.Target), + AttachBpfFd: uint32(opts.Program.FD()), + AttachType: uint32(opts.Attach), + ExpectedRevision: opts.ExpectedRevision, } + + if opts.Anchor != nil { + fdOrID, flags, err := opts.Anchor.anchor() + if err != nil { + return fmt.Errorf("detach program: %w", err) + } + + attr.RelativeFdOrId = fdOrID + attr.AttachFlags |= flags + } + if err := sys.ProgDetach(&attr); err != nil { + if haveFeatErr := haveProgAttach(); haveFeatErr != nil { + return haveFeatErr + } return fmt.Errorf("can't detach program: %w", err) } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/query.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/query.go new file mode 100644 index 000000000..eeca82811 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/link/query.go @@ -0,0 +1,112 @@ +//go:build !windows + +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +// QueryOptions defines additional parameters when querying for programs. +type QueryOptions struct { + // Target to query. This is usually a file descriptor but may refer to + // something else based on the attach type. + Target int + // Attach specifies the AttachType of the programs queried for + Attach ebpf.AttachType + // QueryFlags are flags for BPF_PROG_QUERY, e.g. BPF_F_QUERY_EFFECTIVE + QueryFlags uint32 +} + +// QueryResult describes which programs and links are active. +type QueryResult struct { + // List of attached programs. + Programs []AttachedProgram + + // Incremented by one every time the set of attached programs changes. + // May be zero if not supported by the [ebpf.AttachType]. + Revision uint64 +} + +// HaveLinkInfo returns true if the kernel supports querying link information +// for a particular [ebpf.AttachType]. +func (qr *QueryResult) HaveLinkInfo() bool { + return qr.Revision > 0 +} + +type AttachedProgram struct { + ID ebpf.ProgramID + linkID ID +} + +// LinkID returns the ID associated with the program. +// +// Returns 0, false if the kernel doesn't support retrieving the ID or if the +// program wasn't attached via a link. See [QueryResult.HaveLinkInfo] if you +// need to tell the two apart. +func (ap *AttachedProgram) LinkID() (ID, bool) { + return ap.linkID, ap.linkID != 0 +} + +// QueryPrograms retrieves a list of programs for the given AttachType. +// +// Returns a slice of attached programs, which may be empty. +// revision counts how many times the set of attached programs has changed and +// may be zero if not supported by the [ebpf.AttachType]. +// Returns ErrNotSupportd on a kernel without BPF_PROG_QUERY +func QueryPrograms(opts QueryOptions) (*QueryResult, error) { + // query the number of programs to allocate correct slice size + attr := sys.ProgQueryAttr{ + TargetFdOrIfindex: uint32(opts.Target), + AttachType: sys.AttachType(opts.Attach), + QueryFlags: opts.QueryFlags, + } + err := sys.ProgQuery(&attr) + if err != nil { + if haveFeatErr := haveProgQuery(); haveFeatErr != nil { + return nil, fmt.Errorf("query programs: %w", haveFeatErr) + } + return nil, fmt.Errorf("query programs: %w", err) + } + if attr.Count == 0 { + return &QueryResult{Revision: attr.Revision}, nil + } + + // The minimum bpf_mprog revision is 1, so we can use the field to detect + // whether the attach type supports link ids. + haveLinkIDs := attr.Revision != 0 + + count := attr.Count + progIds := make([]ebpf.ProgramID, count) + attr = sys.ProgQueryAttr{ + TargetFdOrIfindex: uint32(opts.Target), + AttachType: sys.AttachType(opts.Attach), + QueryFlags: opts.QueryFlags, + Count: count, + ProgIds: sys.SlicePointer(progIds), + } + + var linkIds []ID + if haveLinkIDs { + linkIds = make([]ID, count) + attr.LinkIds = sys.SlicePointer(linkIds) + } + + if err := sys.ProgQuery(&attr); err != nil { + return nil, fmt.Errorf("query programs: %w", err) + } + + // NB: attr.Count might have changed between the two syscalls. + var programs []AttachedProgram + for i, id := range progIds[:attr.Count] { + ap := AttachedProgram{ID: id} + if haveLinkIDs { + ap.linkID = linkIds[i] + } + programs = append(programs, ap) + } + + return &QueryResult{programs, attr.Revision}, nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go index 925e621cb..60e667a0c 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go @@ -1,3 +1,5 @@ +//go:build !windows + package link import ( @@ -72,6 +74,10 @@ func (frt *simpleRawTracepoint) Unpin() error { return fmt.Errorf("unpin raw_tracepoint: %w", ErrNotSupported) } +func (frt *simpleRawTracepoint) Detach() error { + return fmt.Errorf("detach raw_tracepoint: %w", ErrNotSupported) +} + func (frt *simpleRawTracepoint) Info() (*Info, error) { return nil, fmt.Errorf("can't get raw_tracepoint info: %w", ErrNotSupported) } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/socket_filter.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/socket_filter.go index 94f3958cc..8399f0231 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/link/socket_filter.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/link/socket_filter.go @@ -1,3 +1,5 @@ +//go:build !windows + package link import ( @@ -15,7 +17,7 @@ func AttachSocketFilter(conn syscall.Conn, program *ebpf.Program) error { } var ssoErr error err = rawConn.Control(func(fd uintptr) { - ssoErr = syscall.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_ATTACH_BPF, program.FD()) + ssoErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_ATTACH_BPF, program.FD()) }) if ssoErr != nil { return ssoErr @@ -31,7 +33,7 @@ func DetachSocketFilter(conn syscall.Conn) error { } var ssoErr error err = rawConn.Control(func(fd uintptr) { - ssoErr = syscall.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_DETACH_BPF, 0) + ssoErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_DETACH_BPF, 0) }) if ssoErr != nil { return ssoErr diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/syscalls.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/syscalls.go index 072dfade2..9948dead4 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/link/syscalls.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/link/syscalls.go @@ -1,3 +1,5 @@ +//go:build !windows + package link import ( @@ -10,25 +12,10 @@ import ( "github.com/cilium/ebpf/internal/unix" ) -// Type is the kind of link. -type Type = sys.LinkType - -// Valid link types. -const ( - UnspecifiedType = sys.BPF_LINK_TYPE_UNSPEC - RawTracepointType = sys.BPF_LINK_TYPE_RAW_TRACEPOINT - TracingType = sys.BPF_LINK_TYPE_TRACING - CgroupType = sys.BPF_LINK_TYPE_CGROUP - IterType = sys.BPF_LINK_TYPE_ITER - NetNsType = sys.BPF_LINK_TYPE_NETNS - XDPType = sys.BPF_LINK_TYPE_XDP -) - -var haveProgAttach = internal.FeatureTest("BPF_PROG_ATTACH", "4.10", func() error { +var haveProgAttach = internal.NewFeatureTest("BPF_PROG_ATTACH", func() error { prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ - Type: ebpf.CGroupSKB, - AttachType: ebpf.AttachCGroupInetIngress, - License: "MIT", + Type: ebpf.CGroupSKB, + License: "MIT", Instructions: asm.Instructions{ asm.Mov.Imm(asm.R0, 0), asm.Return(), @@ -43,9 +30,9 @@ var haveProgAttach = internal.FeatureTest("BPF_PROG_ATTACH", "4.10", func() erro // have the syscall. prog.Close() return nil -}) +}, "4.10") -var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replacement", "5.5", func() error { +var haveProgAttachReplace = internal.NewFeatureTest("BPF_PROG_ATTACH atomic replacement of MULTI progs", func() error { if err := haveProgAttach(); err != nil { return err } @@ -59,9 +46,11 @@ var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replace asm.Return(), }, }) + if err != nil { return internal.ErrNotSupported } + defer prog.Close() // We know that we have BPF_PROG_ATTACH since we can load CGroupSKB programs. @@ -69,10 +58,10 @@ var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replace // present. attr := sys.ProgAttachAttr{ // We rely on this being checked after attachFlags. - TargetFd: ^uint32(0), - AttachBpfFd: uint32(prog.FD()), - AttachType: uint32(ebpf.AttachCGroupInetIngress), - AttachFlags: uint32(flagReplace), + TargetFdOrIfindex: ^uint32(0), + AttachBpfFd: uint32(prog.FD()), + AttachType: uint32(ebpf.AttachCGroupInetIngress), + AttachFlags: uint32(flagReplace), } err = sys.ProgAttach(&attr) @@ -83,9 +72,9 @@ var haveProgAttachReplace = internal.FeatureTest("BPF_PROG_ATTACH atomic replace return nil } return err -}) +}, "5.5") -var haveBPFLink = internal.FeatureTest("bpf_link", "5.7", func() error { +var haveBPFLink = internal.NewFeatureTest("bpf_link", func() error { attr := sys.LinkCreateAttr{ // This is a hopefully invalid file descriptor, which triggers EBADF. TargetFd: ^uint32(0), @@ -100,4 +89,94 @@ var haveBPFLink = internal.FeatureTest("bpf_link", "5.7", func() error { return nil } return err -}) +}, "5.7") + +var haveProgQuery = internal.NewFeatureTest("BPF_PROG_QUERY", func() error { + attr := sys.ProgQueryAttr{ + // We rely on this being checked during the syscall. + // With an otherwise correct payload we expect EBADF here + // as an indication that the feature is present. + TargetFdOrIfindex: ^uint32(0), + AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress), + } + + err := sys.ProgQuery(&attr) + + if errors.Is(err, unix.EBADF) { + return nil + } + if err != nil { + return ErrNotSupported + } + return errors.New("syscall succeeded unexpectedly") +}, "4.15") + +var haveTCX = internal.NewFeatureTest("tcx", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Type: ebpf.SchedCLS, + License: "MIT", + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + }) + + if err != nil { + return internal.ErrNotSupported + } + + defer prog.Close() + attr := sys.LinkCreateTcxAttr{ + // We rely on this being checked during the syscall. + // With an otherwise correct payload we expect ENODEV here + // as an indication that the feature is present. + TargetIfindex: ^uint32(0), + ProgFd: uint32(prog.FD()), + AttachType: sys.AttachType(ebpf.AttachTCXIngress), + } + + _, err = sys.LinkCreateTcx(&attr) + + if errors.Is(err, unix.ENODEV) { + return nil + } + if err != nil { + return ErrNotSupported + } + return errors.New("syscall succeeded unexpectedly") +}, "6.6") + +var haveNetkit = internal.NewFeatureTest("netkit", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Type: ebpf.SchedCLS, + License: "MIT", + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + }) + + if err != nil { + return internal.ErrNotSupported + } + + defer prog.Close() + attr := sys.LinkCreateNetkitAttr{ + // We rely on this being checked during the syscall. + // With an otherwise correct payload we expect ENODEV here + // as an indication that the feature is present. + TargetIfindex: ^uint32(0), + ProgFd: uint32(prog.FD()), + AttachType: sys.AttachType(ebpf.AttachNetkitPrimary), + } + + _, err = sys.LinkCreateNetkit(&attr) + + if errors.Is(err, unix.ENODEV) { + return nil + } + if err != nil { + return ErrNotSupported + } + return errors.New("syscall succeeded unexpectedly") +}, "6.7") diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/tcx.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/tcx.go new file mode 100644 index 000000000..8661018ec --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/link/tcx.go @@ -0,0 +1,91 @@ +//go:build !windows + +package link + +import ( + "fmt" + "runtime" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +type TCXOptions struct { + // Index of the interface to attach to. + Interface int + // Program to attach. + Program *ebpf.Program + // One of the AttachTCX* constants. + Attach ebpf.AttachType + // Attach relative to an anchor. Optional. + Anchor Anchor + // Only attach if the expected revision matches. + ExpectedRevision uint64 + // Flags control the attach behaviour. Specify an Anchor instead of + // F_LINK, F_ID, F_BEFORE, F_AFTER and R_REPLACE. Optional. + Flags uint32 +} + +func AttachTCX(opts TCXOptions) (Link, error) { + if opts.Interface < 0 { + return nil, fmt.Errorf("interface %d is out of bounds", opts.Interface) + } + + if opts.Flags&anchorFlags != 0 { + return nil, fmt.Errorf("disallowed flags: use Anchor to specify attach target") + } + + attr := sys.LinkCreateTcxAttr{ + ProgFd: uint32(opts.Program.FD()), + AttachType: sys.AttachType(opts.Attach), + TargetIfindex: uint32(opts.Interface), + ExpectedRevision: opts.ExpectedRevision, + Flags: opts.Flags, + } + + if opts.Anchor != nil { + fdOrID, flags, err := opts.Anchor.anchor() + if err != nil { + return nil, fmt.Errorf("attach tcx link: %w", err) + } + + attr.RelativeFdOrId = fdOrID + attr.Flags |= flags + } + + fd, err := sys.LinkCreateTcx(&attr) + runtime.KeepAlive(opts.Program) + runtime.KeepAlive(opts.Anchor) + if err != nil { + if haveFeatErr := haveTCX(); haveFeatErr != nil { + return nil, haveFeatErr + } + return nil, fmt.Errorf("attach tcx link: %w", err) + } + + return &tcxLink{RawLink{fd, ""}}, nil +} + +type tcxLink struct { + RawLink +} + +var _ Link = (*tcxLink)(nil) + +func (tcx *tcxLink) Info() (*Info, error) { + var info sys.TcxLinkInfo + if err := sys.ObjInfo(tcx.fd, &info); err != nil { + return nil, fmt.Errorf("tcx link info: %s", err) + } + extra := &TCXInfo{ + Ifindex: info.Ifindex, + AttachType: info.AttachType, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/tracepoint.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/tracepoint.go index 7423df86b..514961ebe 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/link/tracepoint.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/link/tracepoint.go @@ -1,17 +1,30 @@ +//go:build !windows + package link import ( "fmt" "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/tracefs" ) +// TracepointOptions defines additional parameters that will be used +// when loading Tracepoints. +type TracepointOptions struct { + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + // + // Needs kernel 5.15+. + Cookie uint64 +} + // Tracepoint attaches the given eBPF program to the tracepoint with the given -// group and name. See /sys/kernel/debug/tracing/events to find available +// group and name. See /sys/kernel/tracing/events to find available // tracepoints. The top-level directory is the group, the event's subdirectory // is the name. Example: // -// tp, err := Tracepoint("syscalls", "sys_enter_fork", prog) +// tp, err := Tracepoint("syscalls", "sys_enter_fork", prog, nil) // // Losing the reference to the resulting Link (tp) will close the Tracepoint // and prevent further execution of prog. The Link must be Closed during @@ -19,21 +32,20 @@ import ( // // Note that attaching eBPF programs to syscalls (sys_enter_*/sys_exit_*) is // only possible as of kernel 4.14 (commit cf5f5ce). -func Tracepoint(group, name string, prog *ebpf.Program) (Link, error) { +// +// The returned Link may implement [PerfEvent]. +func Tracepoint(group, name string, prog *ebpf.Program, opts *TracepointOptions) (Link, error) { if group == "" || name == "" { return nil, fmt.Errorf("group and name cannot be empty: %w", errInvalidInput) } if prog == nil { return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) } - if !rgxTraceEvent.MatchString(group) || !rgxTraceEvent.MatchString(name) { - return nil, fmt.Errorf("group and name '%s/%s' must be alphanumeric or underscore: %w", group, name, errInvalidInput) - } if prog.Type() != ebpf.TracePoint { return nil, fmt.Errorf("eBPF program type %s is not a Tracepoint: %w", prog.Type(), errInvalidInput) } - tid, err := getTraceEventID(group, name) + tid, err := tracefs.EventID(group, name) if err != nil { return nil, err } @@ -43,18 +55,18 @@ func Tracepoint(group, name string, prog *ebpf.Program) (Link, error) { return nil, err } - pe := &perfEvent{ - fd: fd, - tracefsID: tid, - group: group, - name: name, - typ: tracepointEvent, + var cookie uint64 + if opts != nil { + cookie = opts.Cookie } - if err := pe.attach(prog); err != nil { + pe := newPerfEvent(fd, nil) + + lnk, err := attachPerfEvent(pe, prog, cookie) + if err != nil { pe.Close() return nil, err } - return pe, nil + return lnk, nil } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/tracing.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/tracing.go index 5913592c6..b33b3dc0e 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/link/tracing.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/link/tracing.go @@ -1,21 +1,44 @@ +//go:build !windows + package link import ( + "errors" "fmt" "github.com/cilium/ebpf" - "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" ) type tracing struct { RawLink } -func (f *tracing) Update(new *ebpf.Program) error { +func (f *tracing) Update(_ *ebpf.Program) error { return fmt.Errorf("tracing update: %w", ErrNotSupported) } +func (f *tracing) Info() (*Info, error) { + var info sys.TracingLinkInfo + if err := sys.ObjInfo(f.fd, &info); err != nil { + return nil, fmt.Errorf("tracing link info: %s", err) + } + extra := &TracingInfo{ + TargetObjId: info.TargetObjId, + TargetBtfId: info.TargetBtfId, + AttachType: info.AttachType, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} + // AttachFreplace attaches the given eBPF program to the function it replaces. // // The program and name can either be provided at link time, or can be provided @@ -41,27 +64,27 @@ func AttachFreplace(targetProg *ebpf.Program, name string, prog *ebpf.Program) ( typeID btf.TypeID ) if targetProg != nil { - info, err := targetProg.Info() + btfHandle, err := targetProg.Handle() if err != nil { return nil, err } - btfID, ok := info.BTFID() - if !ok { - return nil, fmt.Errorf("could not get BTF ID for program %s: %w", info.Name, errInvalidInput) - } - btfHandle, err := btf.NewHandleFromID(btfID) + defer btfHandle.Close() + + spec, err := btfHandle.Spec(nil) if err != nil { return nil, err } - defer btfHandle.Close() var function *btf.Func - if err := btfHandle.Spec().TypeByName(name, &function); err != nil { + if err := spec.TypeByName(name, &function); err != nil { return nil, err } target = targetProg.FD() - typeID = function.ID() + typeID, err = spec.TypeID(function) + if err != nil { + return nil, err + } } link, err := AttachRawLink(RawLinkOptions{ @@ -70,23 +93,15 @@ func AttachFreplace(targetProg *ebpf.Program, name string, prog *ebpf.Program) ( Attach: ebpf.AttachNone, BTF: typeID, }) - if err != nil { - return nil, err + if errors.Is(err, sys.ENOTSUPP) { + // This may be returned by bpf_tracing_prog_attach via bpf_arch_text_poke. + return nil, fmt.Errorf("create raw tracepoint: %w", ErrNotSupported) } - - return &tracing{*link}, nil -} - -// LoadPinnedFreplace loads a pinned iterator from a bpffs. -// -// Deprecated: use LoadPinnedLink instead. -func LoadPinnedFreplace(fileName string, opts *ebpf.LoadPinOptions) (Link, error) { - link, err := LoadPinnedRawLink(fileName, TracingType, opts) if err != nil { return nil, err } - return &tracing{*link}, err + return &tracing{*link}, nil } type TracingOptions struct { @@ -94,25 +109,71 @@ type TracingOptions struct { // AttachTraceFEntry/AttachTraceFExit/AttachModifyReturn or // AttachTraceRawTp. Program *ebpf.Program + // Program attach type. Can be one of: + // - AttachTraceFEntry + // - AttachTraceFExit + // - AttachModifyReturn + // - AttachTraceRawTp + // This field is optional. + AttachType ebpf.AttachType + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + Cookie uint64 } type LSMOptions struct { // Program must be of type LSM with attach type // AttachLSMMac. Program *ebpf.Program + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + Cookie uint64 } // attachBTFID links all BPF program types (Tracing/LSM) that they attach to a btf_id. -func attachBTFID(program *ebpf.Program) (Link, error) { +func attachBTFID(program *ebpf.Program, at ebpf.AttachType, cookie uint64) (Link, error) { if program.FD() < 0 { return nil, fmt.Errorf("invalid program %w", sys.ErrClosedFd) } - fd, err := sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{ - ProgFd: uint32(program.FD()), - }) - if err != nil { - return nil, err + var ( + fd *sys.FD + err error + ) + switch at { + case ebpf.AttachTraceFEntry, ebpf.AttachTraceFExit, ebpf.AttachTraceRawTp, + ebpf.AttachModifyReturn, ebpf.AttachLSMMac: + // Attach via BPF link + fd, err = sys.LinkCreateTracing(&sys.LinkCreateTracingAttr{ + ProgFd: uint32(program.FD()), + AttachType: sys.AttachType(at), + Cookie: cookie, + }) + if err == nil { + break + } + if !errors.Is(err, unix.EINVAL) && !errors.Is(err, sys.ENOTSUPP) { + return nil, fmt.Errorf("create tracing link: %w", err) + } + fallthrough + case ebpf.AttachNone: + // Attach via RawTracepointOpen + if cookie > 0 { + return nil, fmt.Errorf("create raw tracepoint with cookie: %w", ErrNotSupported) + } + + fd, err = sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{ + ProgFd: uint32(program.FD()), + }) + if errors.Is(err, sys.ENOTSUPP) { + // This may be returned by bpf_tracing_prog_attach via bpf_arch_text_poke. + return nil, fmt.Errorf("create raw tracepoint: %w", ErrNotSupported) + } + if err != nil { + return nil, fmt.Errorf("create raw tracepoint: %w", err) + } + default: + return nil, fmt.Errorf("invalid attach type: %s", at.String()) } raw := RawLink{fd: fd} @@ -127,8 +188,7 @@ func attachBTFID(program *ebpf.Program) (Link, error) { // a raw_tracepoint link. Other types return a tracing link. return &rawTracepoint{raw}, nil } - - return &tracing{RawLink: RawLink{fd: fd}}, nil + return &tracing{raw}, nil } // AttachTracing links a tracing (fentry/fexit/fmod_ret) BPF program or @@ -139,7 +199,14 @@ func AttachTracing(opts TracingOptions) (Link, error) { return nil, fmt.Errorf("invalid program type %s, expected Tracing", t) } - return attachBTFID(opts.Program) + switch opts.AttachType { + case ebpf.AttachTraceFEntry, ebpf.AttachTraceFExit, ebpf.AttachModifyReturn, + ebpf.AttachTraceRawTp, ebpf.AttachNone: + default: + return nil, fmt.Errorf("invalid attach type: %s", opts.AttachType.String()) + } + + return attachBTFID(opts.Program, opts.AttachType, opts.Cookie) } // AttachLSM links a Linux security module (LSM) BPF Program to a BPF @@ -149,5 +216,5 @@ func AttachLSM(opts LSMOptions) (Link, error) { return nil, fmt.Errorf("invalid program type %s, expected LSM", t) } - return attachBTFID(opts.Program) + return attachBTFID(opts.Program, ebpf.AttachLSMMac, opts.Cookie) } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/uprobe.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/uprobe.go index d603575ca..d20997e9d 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/link/uprobe.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/link/uprobe.go @@ -1,3 +1,5 @@ +//go:build !windows + package link import ( @@ -5,37 +7,27 @@ import ( "errors" "fmt" "os" - "path/filepath" - "regexp" "sync" "github.com/cilium/ebpf" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/tracefs" ) var ( - uprobeEventsPath = filepath.Join(tracefsPath, "uprobe_events") - - // rgxUprobeSymbol is used to strip invalid characters from the uprobe symbol - // as they are not allowed to be used as the EVENT token in tracefs. - rgxUprobeSymbol = regexp.MustCompile("[^a-zA-Z0-9]+") - - uprobeRetprobeBit = struct { - once sync.Once - value uint64 - err error - }{} - uprobeRefCtrOffsetPMUPath = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset" // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/events/core.c#L9799 uprobeRefCtrOffsetShift = 32 - haveRefCtrOffsetPMU = internal.FeatureTest("RefCtrOffsetPMU", "4.20", func() error { + haveRefCtrOffsetPMU = internal.NewFeatureTest("RefCtrOffsetPMU", func() error { _, err := os.Stat(uprobeRefCtrOffsetPMUPath) - if err != nil { + if errors.Is(err, os.ErrNotExist) { return internal.ErrNotSupported } + if err != nil { + return err + } return nil - }) + }, "4.20") // ErrNoSymbol indicates that the given symbol was not found // in the ELF symbols table. @@ -46,15 +38,23 @@ var ( type Executable struct { // Path of the executable on the filesystem. path string - // Parsed ELF symbols and dynamic symbols offsets. - offsets map[string]uint64 + // Parsed ELF and dynamic symbols' cachedAddresses. + cachedAddresses map[string]uint64 + // Keep track of symbol table lazy load. + cacheAddressesOnce sync.Once } // UprobeOptions defines additional parameters that will be used // when loading Uprobes. type UprobeOptions struct { - // Symbol offset. Must be provided in case of external symbols (shared libs). - // If set, overrides the offset eventually parsed from the executable. + // Symbol address. Must be provided in case of external symbols (shared libs). + // If set, overrides the address eventually parsed from the executable. + Address uint64 + // The offset relative to given symbol. Useful when tracing an arbitrary point + // inside the frame of given symbol. + // + // Note: this field changed from being an absolute offset to being relative + // to Address. Offset uint64 // Only set the uprobe on the given process ID. Useful when tracing // shared library calls or programs that have many running instances. @@ -70,11 +70,27 @@ type UprobeOptions struct { // github.com/torvalds/linux/commit/1cc33161a83d // github.com/torvalds/linux/commit/a6ca88b241d5 RefCtrOffset uint64 + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + // + // Needs kernel 5.15+. + Cookie uint64 + // Prefix used for the event name if the uprobe must be attached using tracefs. + // The group name will be formatted as `_`. + // The default empty string is equivalent to "ebpf" as the prefix. + TraceFSPrefix string +} + +func (uo *UprobeOptions) cookie() uint64 { + if uo == nil { + return 0 + } + return uo.Cookie } // To open a new Executable, use: // -// OpenExecutable("/bin/bash") +// OpenExecutable("/bin/bash") // // The returned value can then be used to open Uprobe(s). func OpenExecutable(path string) (*Executable, error) { @@ -82,32 +98,21 @@ func OpenExecutable(path string) (*Executable, error) { return nil, fmt.Errorf("path cannot be empty") } - f, err := os.Open(path) - if err != nil { - return nil, fmt.Errorf("open file '%s': %w", path, err) - } - defer f.Close() - - se, err := internal.NewSafeELFFile(f) + f, err := internal.OpenSafeELFFile(path) if err != nil { return nil, fmt.Errorf("parse ELF file: %w", err) } + defer f.Close() - if se.Type != elf.ET_EXEC && se.Type != elf.ET_DYN { + if f.Type != elf.ET_EXEC && f.Type != elf.ET_DYN { // ELF is not an executable or a shared object. return nil, errors.New("the given file is not an executable or a shared object") } - ex := Executable{ - path: path, - offsets: make(map[string]uint64), - } - - if err := ex.load(se); err != nil { - return nil, err - } - - return &ex, nil + return &Executable{ + path: path, + cachedAddresses: make(map[string]uint64), + }, nil } func (ex *Executable) load(f *internal.SafeELFFile) error { @@ -129,7 +134,7 @@ func (ex *Executable) load(f *internal.SafeELFFile) error { continue } - off := s.Value + address := s.Value // Loop over ELF segments. for _, prog := range f.Progs { @@ -145,45 +150,73 @@ func (ex *Executable) load(f *internal.SafeELFFile) error { // fn symbol offset = fn symbol VA - .text VA + .text offset // // stackoverflow.com/a/40249502 - off = s.Value - prog.Vaddr + prog.Off + address = s.Value - prog.Vaddr + prog.Off break } } - ex.offsets[s.Name] = off + ex.cachedAddresses[s.Name] = address } return nil } -func (ex *Executable) offset(symbol string) (uint64, error) { - if off, ok := ex.offsets[symbol]; ok { - // Symbols with location 0 from section undef are shared library calls and - // are relocated before the binary is executed. Dynamic linking is not - // implemented by the library, so mark this as unsupported for now. - // - // Since only offset values are stored and not elf.Symbol, if the value is 0, - // assume it's an external symbol. - if off == 0 { - return 0, fmt.Errorf("cannot resolve %s library call '%s', "+ - "consider providing the offset via options: %w", ex.path, symbol, ErrNotSupported) +// address calculates the address of a symbol in the executable. +// +// opts must not be nil. +func (ex *Executable) address(symbol string, address, offset uint64) (uint64, error) { + if address > 0 { + return address + offset, nil + } + + var err error + ex.cacheAddressesOnce.Do(func() { + var f *internal.SafeELFFile + f, err = internal.OpenSafeELFFile(ex.path) + if err != nil { + err = fmt.Errorf("parse ELF file: %w", err) + return } - return off, nil + defer f.Close() + + err = ex.load(f) + }) + if err != nil { + return 0, fmt.Errorf("lazy load symbols: %w", err) + } + + address, ok := ex.cachedAddresses[symbol] + if !ok { + return 0, fmt.Errorf("symbol %s: %w", symbol, ErrNoSymbol) + } + + // Symbols with location 0 from section undef are shared library calls and + // are relocated before the binary is executed. Dynamic linking is not + // implemented by the library, so mark this as unsupported for now. + // + // Since only offset values are stored and not elf.Symbol, if the value is 0, + // assume it's an external symbol. + if address == 0 { + return 0, fmt.Errorf("cannot resolve %s library call '%s': %w "+ + "(consider providing UprobeOptions.Address)", ex.path, symbol, ErrNotSupported) } - return 0, fmt.Errorf("symbol %s: %w", symbol, ErrNoSymbol) + + return address + offset, nil } // Uprobe attaches the given eBPF program to a perf event that fires when the // given symbol starts executing in the given Executable. // For example, /bin/bash::main(): // -// ex, _ = OpenExecutable("/bin/bash") -// ex.Uprobe("main", prog, nil) +// ex, _ = OpenExecutable("/bin/bash") +// ex.Uprobe("main", prog, nil) // // When using symbols which belongs to shared libraries, // an offset must be provided via options: // -// up, err := ex.Uprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// up, err := ex.Uprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// +// Note: Setting the Offset field in the options supersedes the symbol's offset. // // Losing the reference to the resulting Link (up) will close the Uprobe // and prevent further execution of prog. The Link must be Closed during @@ -191,31 +224,35 @@ func (ex *Executable) offset(symbol string) (uint64, error) { // // Functions provided by shared libraries can currently not be traced and // will result in an ErrNotSupported. +// +// The returned Link may implement [PerfEvent]. func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) { u, err := ex.uprobe(symbol, prog, opts, false) if err != nil { return nil, err } - err = u.attach(prog) + lnk, err := attachPerfEvent(u, prog, opts.cookie()) if err != nil { u.Close() return nil, err } - return u, nil + return lnk, nil } // Uretprobe attaches the given eBPF program to a perf event that fires right // before the given symbol exits. For example, /bin/bash::main(): // -// ex, _ = OpenExecutable("/bin/bash") -// ex.Uretprobe("main", prog, nil) +// ex, _ = OpenExecutable("/bin/bash") +// ex.Uretprobe("main", prog, nil) // // When using symbols which belongs to shared libraries, // an offset must be provided via options: // -// up, err := ex.Uretprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// up, err := ex.Uretprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// +// Note: Setting the Offset field in the options supersedes the symbol's offset. // // Losing the reference to the resulting Link (up) will close the Uprobe // and prevent further execution of prog. The Link must be Closed during @@ -223,19 +260,21 @@ func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti // // Functions provided by shared libraries can currently not be traced and // will result in an ErrNotSupported. +// +// The returned Link may implement [PerfEvent]. func (ex *Executable) Uretprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) { u, err := ex.uprobe(symbol, prog, opts, true) if err != nil { return nil, err } - err = u.attach(prog) + lnk, err := attachPerfEvent(u, prog, opts.cookie()) if err != nil { u.Close() return nil, err } - return u, nil + return lnk, nil } // uprobe opens a perf event for the given binary/symbol and attaches prog to it. @@ -251,13 +290,9 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti opts = &UprobeOptions{} } - offset := opts.Offset - if offset == 0 { - off, err := ex.offset(symbol) - if err != nil { - return nil, err - } - offset = off + offset, err := ex.address(symbol, opts.Address, opts.Offset) + if err != nil { + return nil, err } pid := opts.PID @@ -271,65 +306,32 @@ func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOpti } } - args := probeArgs{ - symbol: symbol, - path: ex.path, - offset: offset, - pid: pid, - refCtrOffset: opts.RefCtrOffset, - ret: ret, + args := tracefs.ProbeArgs{ + Type: tracefs.Uprobe, + Symbol: symbol, + Path: ex.path, + Offset: offset, + Pid: pid, + RefCtrOffset: opts.RefCtrOffset, + Ret: ret, + Cookie: opts.Cookie, + Group: opts.TraceFSPrefix, } // Use uprobe PMU if the kernel has it available. - tp, err := pmuUprobe(args) + tp, err := pmuProbe(args) if err == nil { return tp, nil } - if err != nil && !errors.Is(err, ErrNotSupported) { + if !errors.Is(err, ErrNotSupported) { return nil, fmt.Errorf("creating perf_uprobe PMU: %w", err) } // Use tracefs if uprobe PMU is missing. - args.symbol = uprobeSanitizedSymbol(symbol) - tp, err = tracefsUprobe(args) + tp, err = tracefsProbe(args) if err != nil { return nil, fmt.Errorf("creating trace event '%s:%s' in tracefs: %w", ex.path, symbol, err) } return tp, nil } - -// pmuUprobe opens a perf event based on the uprobe PMU. -func pmuUprobe(args probeArgs) (*perfEvent, error) { - return pmuProbe(uprobeType, args) -} - -// tracefsUprobe creates a Uprobe tracefs entry. -func tracefsUprobe(args probeArgs) (*perfEvent, error) { - return tracefsProbe(uprobeType, args) -} - -// uprobeSanitizedSymbol replaces every invalid characted for the tracefs api with an underscore. -func uprobeSanitizedSymbol(symbol string) string { - return rgxUprobeSymbol.ReplaceAllString(symbol, "_") -} - -// uprobeToken creates the PATH:OFFSET(REF_CTR_OFFSET) token for the tracefs api. -func uprobeToken(args probeArgs) string { - po := fmt.Sprintf("%s:%#x", args.path, args.offset) - - if args.refCtrOffset != 0 { - // This is not documented in Documentation/trace/uprobetracer.txt. - // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/trace/trace.c#L5564 - po += fmt.Sprintf("(%#x)", args.refCtrOffset) - } - - return po -} - -func uretprobeBit() (uint64, error) { - uprobeRetprobeBit.once.Do(func() { - uprobeRetprobeBit.value, uprobeRetprobeBit.err = determineRetprobeBit(uprobeType) - }) - return uprobeRetprobeBit.value, uprobeRetprobeBit.err -} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/uprobe_multi.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/uprobe_multi.go new file mode 100644 index 000000000..e34ad7168 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/link/uprobe_multi.go @@ -0,0 +1,220 @@ +//go:build !windows + +package link + +import ( + "errors" + "fmt" + "os" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// UprobeMultiOptions defines additional parameters that will be used +// when opening a UprobeMulti Link. +type UprobeMultiOptions struct { + // Symbol addresses. If set, overrides the addresses eventually parsed from + // the executable. Mutually exclusive with UprobeMulti's symbols argument. + Addresses []uint64 + + // Offsets into functions provided by UprobeMulti's symbols argument. + // For example: to set uprobes to main+5 and _start+10, call UprobeMulti + // with: + // symbols: "main", "_start" + // opt.Offsets: 5, 10 + Offsets []uint64 + + // Optional list of associated ref counter offsets. + RefCtrOffsets []uint64 + + // Optional list of associated BPF cookies. + Cookies []uint64 + + // Only set the uprobe_multi link on the given process ID, zero PID means + // system-wide. + PID uint32 +} + +func (ex *Executable) UprobeMulti(symbols []string, prog *ebpf.Program, opts *UprobeMultiOptions) (Link, error) { + return ex.uprobeMulti(symbols, prog, opts, 0) +} + +func (ex *Executable) UretprobeMulti(symbols []string, prog *ebpf.Program, opts *UprobeMultiOptions) (Link, error) { + + // The return probe is not limited for symbols entry, so there's no special + // setup for return uprobes (other than the extra flag). The symbols, opts.Offsets + // and opts.Addresses arrays follow the same logic as for entry uprobes. + return ex.uprobeMulti(symbols, prog, opts, sys.BPF_F_UPROBE_MULTI_RETURN) +} + +func (ex *Executable) uprobeMulti(symbols []string, prog *ebpf.Program, opts *UprobeMultiOptions, flags uint32) (Link, error) { + if prog == nil { + return nil, errors.New("cannot attach a nil program") + } + + if opts == nil { + opts = &UprobeMultiOptions{} + } + + addresses, err := ex.addresses(symbols, opts.Addresses, opts.Offsets) + if err != nil { + return nil, err + } + + addrs := len(addresses) + cookies := len(opts.Cookies) + refCtrOffsets := len(opts.RefCtrOffsets) + + if addrs == 0 { + return nil, fmt.Errorf("field Addresses is required: %w", errInvalidInput) + } + if refCtrOffsets > 0 && refCtrOffsets != addrs { + return nil, fmt.Errorf("field RefCtrOffsets must be exactly Addresses in length: %w", errInvalidInput) + } + if cookies > 0 && cookies != addrs { + return nil, fmt.Errorf("field Cookies must be exactly Addresses in length: %w", errInvalidInput) + } + + attr := &sys.LinkCreateUprobeMultiAttr{ + Path: sys.NewStringPointer(ex.path), + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_TRACE_UPROBE_MULTI, + UprobeMultiFlags: flags, + Count: uint32(addrs), + Offsets: sys.SlicePointer(addresses), + Pid: opts.PID, + } + + if refCtrOffsets != 0 { + attr.RefCtrOffsets = sys.SlicePointer(opts.RefCtrOffsets) + } + if cookies != 0 { + attr.Cookies = sys.SlicePointer(opts.Cookies) + } + + fd, err := sys.LinkCreateUprobeMulti(attr) + if errors.Is(err, unix.ESRCH) { + return nil, fmt.Errorf("%w (specified pid not found?)", os.ErrNotExist) + } + // Since Linux commit 46ba0e49b642 ("bpf: fix multi-uprobe PID filtering + // logic"), if the provided pid overflows MaxInt32 (turning it negative), the + // kernel will return EINVAL instead of ESRCH. + if errors.Is(err, unix.EINVAL) { + return nil, fmt.Errorf("%w (invalid pid, missing symbol or prog's AttachType not AttachTraceUprobeMulti?)", err) + } + + if err != nil { + if haveFeatErr := haveBPFLinkUprobeMulti(); haveFeatErr != nil { + return nil, haveFeatErr + } + return nil, err + } + + return &uprobeMultiLink{RawLink{fd, ""}}, nil +} + +func (ex *Executable) addresses(symbols []string, addresses, offsets []uint64) ([]uint64, error) { + n := len(symbols) + if n == 0 { + n = len(addresses) + } + + if n == 0 { + return nil, fmt.Errorf("%w: neither symbols nor addresses given", errInvalidInput) + } + + if symbols != nil && len(symbols) != n { + return nil, fmt.Errorf("%w: have %d symbols but want %d", errInvalidInput, len(symbols), n) + } + + if addresses != nil && len(addresses) != n { + return nil, fmt.Errorf("%w: have %d addresses but want %d", errInvalidInput, len(addresses), n) + } + + if offsets != nil && len(offsets) != n { + return nil, fmt.Errorf("%w: have %d offsets but want %d", errInvalidInput, len(offsets), n) + } + + results := make([]uint64, 0, n) + for i := 0; i < n; i++ { + var sym string + if symbols != nil { + sym = symbols[i] + } + + var addr, off uint64 + if addresses != nil { + addr = addresses[i] + } + + if offsets != nil { + off = offsets[i] + } + + result, err := ex.address(sym, addr, off) + if err != nil { + return nil, err + } + + results = append(results, result) + } + + return results, nil +} + +type uprobeMultiLink struct { + RawLink +} + +var _ Link = (*uprobeMultiLink)(nil) + +func (kml *uprobeMultiLink) Update(_ *ebpf.Program) error { + return fmt.Errorf("update uprobe_multi: %w", ErrNotSupported) +} + +var haveBPFLinkUprobeMulti = internal.NewFeatureTest("bpf_link_uprobe_multi", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Name: "probe_upm_link", + Type: ebpf.Kprobe, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + AttachType: ebpf.AttachTraceUprobeMulti, + License: "MIT", + }) + if errors.Is(err, unix.E2BIG) { + // Kernel doesn't support AttachType field. + return internal.ErrNotSupported + } + if err != nil { + return err + } + defer prog.Close() + + // We try to create uprobe multi link on '/' path which results in + // error with -EBADF in case uprobe multi link is supported. + fd, err := sys.LinkCreateUprobeMulti(&sys.LinkCreateUprobeMultiAttr{ + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_TRACE_UPROBE_MULTI, + Path: sys.NewStringPointer("/"), + Offsets: sys.SlicePointer([]uint64{0}), + Count: 1, + }) + switch { + case errors.Is(err, unix.EBADF): + return nil + case errors.Is(err, unix.EINVAL): + return internal.ErrNotSupported + case err != nil: + return err + } + + // should not happen + fd.Close() + return errors.New("successfully attached uprobe_multi to /, kernel bug?") +}, "6.6") diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/link/xdp.go b/src/nvcgo/vendor/github.com/cilium/ebpf/link/xdp.go index aa8dd3a4c..2daf0c4a2 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/link/xdp.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/link/xdp.go @@ -1,9 +1,12 @@ +//go:build !windows + package link import ( "fmt" "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" ) // XDPAttachFlags represents how XDP program will be attached to interface. @@ -50,5 +53,30 @@ func AttachXDP(opts XDPOptions) (Link, error) { Flags: uint32(opts.Flags), }) - return rawLink, err + if err != nil { + return nil, fmt.Errorf("failed to attach link: %w", err) + } + + return &xdpLink{*rawLink}, nil +} + +type xdpLink struct { + RawLink +} + +func (xdp *xdpLink) Info() (*Info, error) { + var info sys.XDPLinkInfo + if err := sys.ObjInfo(xdp.fd, &info); err != nil { + return nil, fmt.Errorf("xdp link info: %s", err) + } + extra := &XDPInfo{ + Ifindex: info.Ifindex, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/linker.go b/src/nvcgo/vendor/github.com/cilium/ebpf/linker.go index b056f99ae..98c4a0d0b 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/linker.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/linker.go @@ -1,14 +1,100 @@ package ebpf import ( - "bytes" + "debug/elf" "encoding/binary" + "errors" "fmt" + "io" + "io/fs" + "math" + "slices" + "strings" "github.com/cilium/ebpf/asm" - "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/kallsyms" + "github.com/cilium/ebpf/internal/platform" ) +// handles stores handle objects to avoid gc cleanup +type handles []*btf.Handle + +func (hs *handles) add(h *btf.Handle) (int, error) { + if h == nil { + return 0, nil + } + + if len(*hs) == math.MaxInt16 { + return 0, fmt.Errorf("can't add more than %d module FDs to fdArray", math.MaxInt16) + } + + *hs = append(*hs, h) + + // return length of slice so that indexes start at 1 + return len(*hs), nil +} + +func (hs handles) fdArray() []int32 { + // first element of fda is reserved as no module can be indexed with 0 + fda := []int32{0} + for _, h := range hs { + fda = append(fda, int32(h.FD())) + } + + return fda +} + +func (hs *handles) Close() error { + var errs []error + for _, h := range *hs { + errs = append(errs, h.Close()) + } + return errors.Join(errs...) +} + +// splitSymbols splits insns into subsections delimited by Symbol Instructions. +// insns cannot be empty and must start with a Symbol Instruction. +// +// The resulting map is indexed by Symbol name. +func splitSymbols(insns asm.Instructions) (map[string]asm.Instructions, error) { + if len(insns) == 0 { + return nil, errors.New("insns is empty") + } + + currentSym := insns[0].Symbol() + if currentSym == "" { + return nil, errors.New("insns must start with a Symbol") + } + + start := 0 + progs := make(map[string]asm.Instructions) + for i, ins := range insns[1:] { + i := i + 1 + + sym := ins.Symbol() + if sym == "" { + continue + } + + // New symbol, flush the old one out. + progs[currentSym] = slices.Clone(insns[start:i]) + + if progs[sym] != nil { + return nil, fmt.Errorf("insns contains duplicate Symbol %s", sym) + } + currentSym = sym + start = i + } + + if tail := insns[start:]; len(tail) > 0 { + progs[currentSym] = slices.Clone(tail) + } + + return progs, nil +} + // The linker is responsible for resolving bpf-to-bpf calls between programs // within an ELF. Each BPF program must be a self-contained binary blob, // so when an instruction in one ELF program section wants to jump to @@ -23,170 +109,448 @@ import ( // Each function is denoted by an ELF symbol and the compiler takes care of // register setup before each jump instruction. -// populateReferences populates all of progs' Instructions and references -// with their full dependency chains including transient dependencies. -func populateReferences(progs map[string]*ProgramSpec) error { - type props struct { - insns asm.Instructions - refs map[string]*ProgramSpec +// hasFunctionReferences returns true if insns contains one or more bpf2bpf +// function references. +func hasFunctionReferences(insns asm.Instructions) bool { + for _, i := range insns { + if i.IsFunctionReference() { + return true + } } + return false +} - out := make(map[string]props) +// applyRelocations collects and applies any CO-RE relocations in insns. +// +// insns are modified in place. +func applyRelocations(insns asm.Instructions, bo binary.ByteOrder, b *btf.Builder, c *btf.Cache, kernelOverride *btf.Spec, extraTargets []*btf.Spec) error { + var relos []*btf.CORERelocation + var reloInsns []*asm.Instruction + iter := insns.Iterate() + for iter.Next() { + if relo := btf.CORERelocationMetadata(iter.Ins); relo != nil { + relos = append(relos, relo) + reloInsns = append(reloInsns, iter.Ins) + } + } - // Resolve and store direct references between all progs. - if err := findReferences(progs); err != nil { - return fmt.Errorf("finding references: %w", err) + if len(relos) == 0 { + return nil } - // Flatten all progs' instruction streams. - for name, prog := range progs { - insns, refs := prog.flatten(nil) + if bo == nil { + bo = internal.NativeEndian + } - prop := props{ - insns: insns, - refs: refs, + var targets []*btf.Spec + if kernelOverride == nil { + kernel, err := c.Kernel() + if err != nil { + return fmt.Errorf("load kernel spec: %w", err) } - out[name] = prop - } + modules, err := c.Modules() + // Ignore ErrNotExists to cater to kernels which have CONFIG_DEBUG_INFO_BTF_MODULES + // or CONFIG_DEBUG_INFO_BTF disabled. + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return err + } - // Replace all progs' instructions and references - for name, props := range out { - progs[name].Instructions = props.insns - progs[name].references = props.refs + targets = make([]*btf.Spec, 0, 1+len(modules)+len(extraTargets)) + targets = append(targets, kernel) + + for _, kmod := range modules { + spec, err := c.Module(kmod) + if err != nil { + return fmt.Errorf("load BTF for kmod %s: %w", kmod, err) + } + + targets = append(targets, spec) + } + } else { + // We expect kernelOverride to contain the merged types + // of vmlinux and kernel modules, as distributed by btfhub. + targets = []*btf.Spec{kernelOverride} } - return nil -} + targets = append(targets, extraTargets...) -// findReferences finds bpf-to-bpf calls between progs and populates each -// prog's references field with its direct neighbours. -func findReferences(progs map[string]*ProgramSpec) error { - // Check all ProgramSpecs in the collection against each other. - for _, prog := range progs { - prog.references = make(map[string]*ProgramSpec) - - // Look up call targets in progs and store pointers to their corresponding - // ProgramSpecs as direct references. - for refname := range prog.Instructions.FunctionReferences() { - ref := progs[refname] - // Call targets are allowed to be missing from an ELF. This occurs when - // a program calls into a forward function declaration that is left - // unimplemented. This is caught at load time during fixups. - if ref != nil { - prog.references[refname] = ref - } + fixups, err := btf.CORERelocate(relos, targets, bo, b.Add) + if err != nil { + return err + } + + for i, fixup := range fixups { + if err := fixup.Apply(reloInsns[i]); err != nil { + return fmt.Errorf("fixup for %s: %w", relos[i], err) } } return nil } -// marshalFuncInfos returns the BTF func infos of all progs in order. -func marshalFuncInfos(layout []reference) ([]byte, error) { - if len(layout) == 0 { - return nil, nil +// flattenPrograms resolves bpf-to-bpf calls for a set of programs. +// +// Links all programs in names by modifying their ProgramSpec in progs. +func flattenPrograms(progs map[string]*ProgramSpec, names []string) { + // Pre-calculate all function references. + refs := make(map[*ProgramSpec][]string) + for _, prog := range progs { + refs[prog] = prog.Instructions.FunctionReferences() } - buf := bytes.NewBuffer(make([]byte, 0, binary.Size(&btf.FuncInfo{})*len(layout))) - for _, sym := range layout { - if err := sym.spec.BTF.FuncInfo.Marshal(buf, sym.offset); err != nil { - return nil, fmt.Errorf("marshaling prog %s func info: %w", sym.spec.Name, err) - } + // Create a flattened instruction stream, but don't modify progs yet to + // avoid linking multiple times. + flattened := make([]asm.Instructions, 0, len(names)) + for _, name := range names { + flattened = append(flattened, flattenInstructions(name, progs, refs)) } - return buf.Bytes(), nil + // Finally, assign the flattened instructions. + for i, name := range names { + progs[name].Instructions = flattened[i] + } } -// marshalLineInfos returns the BTF line infos of all progs in order. -func marshalLineInfos(layout []reference) ([]byte, error) { - if len(layout) == 0 { - return nil, nil +// flattenInstructions resolves bpf-to-bpf calls for a single program. +// +// Flattens the instructions of prog by concatenating the instructions of all +// direct and indirect dependencies. +// +// progs contains all referenceable programs, while refs contain the direct +// dependencies of each program. +func flattenInstructions(name string, progs map[string]*ProgramSpec, refs map[*ProgramSpec][]string) asm.Instructions { + prog := progs[name] + progRefs := refs[prog] + + if len(progRefs) == 0 { + // No references, nothing to do. + return prog.Instructions } - buf := bytes.NewBuffer(make([]byte, 0, binary.Size(&btf.LineInfo{})*len(layout))) - for _, sym := range layout { - if err := sym.spec.BTF.LineInfos.Marshal(buf, sym.offset); err != nil { - return nil, fmt.Errorf("marshaling prog %s line infos: %w", sym.spec.Name, err) + insns := make(asm.Instructions, len(prog.Instructions)) + copy(insns, prog.Instructions) + + // Add all direct references of prog to the list of to be linked programs. + pending := make([]string, len(progRefs)) + copy(pending, progRefs) + + // All references for which we've appended instructions. + linked := make(map[string]bool) + + // Iterate all pending references. We can't use a range since pending is + // modified in the body below. + for len(pending) > 0 { + var ref string + ref, pending = pending[0], pending[1:] + + if linked[ref] { + // We've already linked this ref, don't append instructions again. + continue + } + + progRef := progs[ref] + if progRef == nil { + // We don't have instructions that go with this reference. This + // happens when calling extern functions. + continue } + + insns = append(insns, progRef.Instructions...) + linked[ref] = true + + // Make sure we link indirect references. + pending = append(pending, refs[progRef]...) } - return buf.Bytes(), nil + return insns } -func fixupJumpsAndCalls(insns asm.Instructions) error { - symbolOffsets := make(map[string]asm.RawInstructionOffset) +// fixupAndValidate is called by the ELF reader right before marshaling the +// instruction stream. It performs last-minute adjustments to the program and +// runs some sanity checks before sending it off to the kernel. +func fixupAndValidate(insns asm.Instructions) error { iter := insns.Iterate() for iter.Next() { ins := iter.Ins - if ins.Symbol == "" { - continue + // Map load was tagged with a Reference, but does not contain a Map pointer. + needsMap := ins.Reference() != "" || ins.Metadata.Get(kconfigMetaKey{}) != nil + if ins.IsLoadFromMap() && needsMap && ins.Map() == nil { + return fmt.Errorf("instruction %d: %w", iter.Index, asm.ErrUnsatisfiedMapReference) } - if _, ok := symbolOffsets[ins.Symbol]; ok { - return fmt.Errorf("duplicate symbol %s", ins.Symbol) - } + fixupProbeReadKernel(ins) + } - symbolOffsets[ins.Symbol] = iter.Offset + return nil +} + +// A constant used to poison calls to non-existent kfuncs. +// +// Similar POISON_CALL_KFUNC_BASE in libbpf, except that we use a value lower +// than 2^28 to fit into a tagged constant. +const kfuncCallPoisonBase = 0xdedc0de + +// fixupKfuncs loops over all instructions in search for kfunc calls. +// If at least one is found, the current kernels BTF and module BTFis are searched to set Instruction.Constant +// and Instruction.Offset to the correct values. +func fixupKfuncs(insns asm.Instructions, cache *btf.Cache) (_ handles, err error) { + closeOnError := func(c io.Closer) { + if err != nil { + c.Close() + } } - iter = insns.Iterate() + iter := insns.Iterate() for iter.Next() { - i := iter.Index - offset := iter.Offset ins := iter.Ins - - if ins.Reference == "" { - continue + if metadata := ins.Metadata.Get(kfuncMetaKey{}); metadata != nil { + goto fixups } + } - symOffset, ok := symbolOffsets[ins.Reference] - switch { - case ins.IsFunctionReference() && ins.Constant == -1: - if !ok { + return nil, nil + +fixups: + // Only load kernel BTF if we found at least one kfunc call. kernelSpec can be + // nil if the kernel does not have BTF, in which case we poison all kfunc + // calls. + _, err = cache.Kernel() + // ErrNotSupportedOnOS wraps ErrNotSupported, check for it first. + if errors.Is(err, internal.ErrNotSupportedOnOS) { + return nil, fmt.Errorf("kfuncs are not supported on this platform: %w", err) + } + if err != nil && !errors.Is(err, ErrNotSupported) { + return nil, err + } + + fdArray := make(handles, 0) + defer closeOnError(&fdArray) + + for { + ins := iter.Ins + + metadata := ins.Metadata.Get(kfuncMetaKey{}) + if metadata == nil { + if !iter.Next() { + // break loop if this was the last instruction in the stream. break } - - ins.Constant = int64(symOffset - offset - 1) continue + } - case ins.OpCode.Class().IsJump() && ins.Offset == -1: - if !ok { - break + // check meta, if no meta return err + kfm, _ := metadata.(*kfuncMeta) + if kfm == nil { + return nil, fmt.Errorf("kfuncMetaKey doesn't contain kfuncMeta") + } + + // findTargetInKernel returns btf.ErrNotFound if the input btf.Spec is nil. + target := btf.Type((*btf.Func)(nil)) + spec, module, err := findTargetInKernel(kfm.Func.Name, &target, cache) + if errors.Is(err, btf.ErrNotFound) { + if kfm.Binding == elf.STB_WEAK { + if ins.IsKfuncCall() { + // If the kfunc call is weak and not found, poison the call. Use a + // recognizable constant to make it easier to debug. + fn, err := asm.BuiltinFuncForPlatform(platform.Native, kfuncCallPoisonBase) + if err != nil { + return nil, err + } + *ins = fn.Call() + } else if ins.OpCode.IsDWordLoad() { + // If the kfunc DWordLoad is weak and not found, set its address to 0. + ins.Constant = 0 + ins.Src = 0 + } else { + return nil, fmt.Errorf("only kfunc calls and dword loads may have kfunc metadata") + } + + iter.Next() + continue } - ins.Offset = int16(symOffset - offset - 1) - continue + // Error on non-weak kfunc not found. + return nil, fmt.Errorf("kfunc %q: %w", kfm.Func.Name, ErrNotSupported) + } + if err != nil { + return nil, fmt.Errorf("finding kfunc in kernel: %w", err) + } - case ins.IsLoadFromMap() && ins.MapPtr() == -1: - return fmt.Errorf("map %s: %w", ins.Reference, errUnsatisfiedMap) - default: - // no fixup needed - continue + idx, err := fdArray.add(module) + if err != nil { + return nil, err + } + + if err := btf.CheckTypeCompatibility(kfm.Func.Type, target.(*btf.Func).Type); err != nil { + return nil, &incompatibleKfuncError{kfm.Func.Name, err} + } + + id, err := spec.TypeID(target) + if err != nil { + return nil, err + } + + ins.Constant = int64(id) + ins.Offset = int16(idx) + + if !iter.Next() { + break } + } + + return fdArray, nil +} + +type incompatibleKfuncError struct { + name string + err error +} + +func (ike *incompatibleKfuncError) Error() string { + return fmt.Sprintf("kfunc %q: %s", ike.name, ike.err) +} + +// fixupProbeReadKernel replaces calls to bpf_probe_read_{kernel,user}(_str) +// with bpf_probe_read(_str) on kernels that don't support it yet. +func fixupProbeReadKernel(ins *asm.Instruction) { + if !ins.IsBuiltinCall() { + return + } - return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference, errUnsatisfiedProgram) + // Kernel supports bpf_probe_read_kernel, nothing to do. + if haveProbeReadKernel() == nil { + return } - // fixupBPFCalls replaces bpf_probe_read_{kernel,user}[_str] with bpf_probe_read[_str] on older kernels - // https://github.com/libbpf/libbpf/blob/master/src/libbpf.c#L6009 + switch asm.BuiltinFunc(ins.Constant) { + case asm.FnProbeReadKernel, asm.FnProbeReadUser: + ins.Constant = int64(asm.FnProbeRead) + case asm.FnProbeReadKernelStr, asm.FnProbeReadUserStr: + ins.Constant = int64(asm.FnProbeReadStr) + } +} + +// resolveKconfigReferences creates and populates a .kconfig map if necessary. +// +// Returns a nil Map and no error if no references exist. +func resolveKconfigReferences(insns asm.Instructions) (_ *Map, err error) { + closeOnError := func(c io.Closer) { + if err != nil { + c.Close() + } + } + + var spec *MapSpec + iter := insns.Iterate() + for iter.Next() { + meta, _ := iter.Ins.Metadata.Get(kconfigMetaKey{}).(*kconfigMeta) + if meta != nil { + spec = meta.Map + break + } + } + + if spec == nil { + return nil, nil + } + + cpy := spec.Copy() + if err := resolveKconfig(cpy); err != nil { + return nil, err + } + + kconfig, err := NewMap(cpy) + if err != nil { + return nil, err + } + defer closeOnError(kconfig) + + // Resolve all instructions which load from .kconfig map with actual map + // and offset inside it. iter = insns.Iterate() + for iter.Next() { + meta, _ := iter.Ins.Metadata.Get(kconfigMetaKey{}).(*kconfigMeta) + if meta == nil { + continue + } + + if meta.Map != spec { + return nil, fmt.Errorf("instruction %d: reference to multiple .kconfig maps is not allowed", iter.Index) + } + + if err := iter.Ins.AssociateMap(kconfig); err != nil { + return nil, fmt.Errorf("instruction %d: %w", iter.Index, err) + } + + // Encode a map read at the offset of the var in the datasec. + iter.Ins.Constant = int64(uint64(meta.Offset) << 32) + iter.Ins.Metadata.Set(kconfigMetaKey{}, nil) + } + + return kconfig, nil +} + +func resolveKsymReferences(insns asm.Instructions) error { + type fixup struct { + *asm.Instruction + *ksymMeta + } + + var symbols map[string]uint64 + var fixups []fixup + + iter := insns.Iterate() for iter.Next() { ins := iter.Ins - if !ins.IsBuiltinCall() { + meta, _ := ins.Metadata.Get(ksymMetaKey{}).(*ksymMeta) + if meta == nil { continue } - switch asm.BuiltinFunc(ins.Constant) { - case asm.FnProbeReadKernel, asm.FnProbeReadUser: - if err := haveProbeReadKernel(); err != nil { - ins.Constant = int64(asm.FnProbeRead) - } - case asm.FnProbeReadKernelStr, asm.FnProbeReadUserStr: - if err := haveProbeReadKernel(); err != nil { - ins.Constant = int64(asm.FnProbeReadStr) + + if symbols == nil { + symbols = make(map[string]uint64) + } + + symbols[meta.Name] = 0 + fixups = append(fixups, fixup{ + iter.Ins, meta, + }) + } + + if len(symbols) == 0 { + return nil + } + + err := kallsyms.AssignAddresses(symbols) + // Tolerate ErrRestrictedKernel during initial lookup, user may have all weak + // ksyms and a fallback path. + if err != nil && !errors.Is(err, ErrRestrictedKernel) { + return fmt.Errorf("resolve ksyms: %w", err) + } + + var missing []string + for _, fixup := range fixups { + addr := symbols[fixup.Name] + // A weak ksym variable in eBPF C means its resolution is optional. + if addr == 0 && fixup.Binding != elf.STB_WEAK { + if !slices.Contains(missing, fixup.Name) { + missing = append(missing, fixup.Name) } + continue } + + fixup.Constant = int64(addr) + } + + if len(missing) > 0 { + if err != nil { + // Program contains required ksyms, return the error from above. + return fmt.Errorf("resolve required ksyms: %s: %w", strings.Join(missing, ","), err) + } + + return fmt.Errorf("kernel is missing symbol: %s", strings.Join(missing, ",")) } return nil diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/map.go b/src/nvcgo/vendor/github.com/cilium/ebpf/map.go index b49b40187..f9499272b 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/map.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/map.go @@ -6,25 +6,34 @@ import ( "fmt" "io" "math/rand" + "os" "path/filepath" "reflect" + "slices" "strings" + "sync" "time" "unsafe" + "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/internal/platform" "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/sysenc" "github.com/cilium/ebpf/internal/unix" ) // Errors returned by Map and MapIterator methods. var ( - errFirstKeyNotFound = errors.New("first key not found") ErrKeyNotExist = errors.New("key does not exist") ErrKeyExist = errors.New("key already exists") ErrIterationAborted = errors.New("iteration aborted") - ErrMapIncompatible = errors.New("map's spec is incompatible with pinned map") + ErrMapIncompatible = errors.New("map spec is incompatible with existing map") + errMapNoBTFValue = errors.New("map spec does not contain a BTF Value") + + // pre-allocating these errors here since they may get called in hot code paths + // and cause unnecessary memory allocations + errMapLookupKeyNotExist = fmt.Errorf("lookup: %w", sysErrKeyNotExist) ) // MapOptions control loading a map into the kernel. @@ -37,12 +46,13 @@ type MapOptions struct { } // MapID represents the unique ID of an eBPF map -type MapID uint32 +type MapID = sys.MapID // MapSpec defines a Map. type MapSpec struct { - // Name is passed to the kernel as a debug aid. Must only contain - // alpha numeric and '_' characters. + // Name is passed to the kernel as a debug aid. + // + // Unsupported characters will be stripped. Name string Type MapType KeySize uint32 @@ -58,26 +68,33 @@ type MapSpec struct { Pinning PinType // Specify numa node during map creation - // (effective only if unix.BPF_F_NUMA_NODE flag is set, + // (effective only if sys.BPF_F_NUMA_NODE flag is set, // which can be imported from golang.org/x/sys/unix) NumaNode uint32 // The initial contents of the map. May be nil. Contents []MapKV - // Whether to freeze a map after setting its initial contents. - Freeze bool - // InnerMap is used as a template for ArrayOfMaps and HashOfMaps InnerMap *MapSpec + // MapExtra is an opaque field whose meaning is map-specific. + // + // Available from 5.16. + MapExtra uint64 + // Extra trailing bytes found in the ELF map definition when using structs // larger than libbpf's bpf_map_def. nil if no trailing bytes were present. // Must be nil or empty before instantiating the MapSpec into a Map. Extra *bytes.Reader - // The BTF associated with this map. - BTF *btf.Map + // The key and value type of this map. May be nil. + Key, Value btf.Type + + // Tags is a list of btf_decl_tag attributes set on the map definition. + // + // Decorate a map definition with `__attribute__((btf_decl_tag("foo")))`. + Tags []string } func (ms *MapSpec) String() string { @@ -93,36 +110,113 @@ func (ms *MapSpec) Copy() *MapSpec { } cpy := *ms + cpy.Contents = slices.Clone(cpy.Contents) + cpy.Key = btf.Copy(cpy.Key) + cpy.Value = btf.Copy(cpy.Value) + cpy.Tags = slices.Clone(cpy.Tags) - cpy.Contents = make([]MapKV, len(ms.Contents)) - copy(cpy.Contents, ms.Contents) + if cpy.InnerMap == ms { + cpy.InnerMap = &cpy + } else { + cpy.InnerMap = ms.InnerMap.Copy() + } - cpy.InnerMap = ms.InnerMap.Copy() + if cpy.Extra != nil { + extra := *cpy.Extra + cpy.Extra = &extra + } return &cpy } -// hasBTF returns true if the MapSpec has a valid BTF spec and if its -// map type supports associated BTF metadata in the kernel. -func (ms *MapSpec) hasBTF() bool { - return ms.BTF != nil && ms.Type.hasBTF() +// fixupMagicFields fills fields of MapSpec which are usually +// left empty in ELF or which depend on runtime information. +// +// The method doesn't modify Spec, instead returning a copy. +// The copy is only performed if fixups are necessary, so callers mustn't mutate +// the returned spec. +func (spec *MapSpec) fixupMagicFields() (*MapSpec, error) { + switch { + case spec.Type.canStoreMap(): + if spec.ValueSize != 0 && spec.ValueSize != 4 { + return nil, errors.New("ValueSize must be zero or four for map of map") + } + + spec = spec.Copy() + spec.ValueSize = 4 + + case spec.Type == PerfEventArray: + if spec.KeySize != 0 && spec.KeySize != 4 { + return nil, errors.New("KeySize must be zero or four for perf event array") + } + + if spec.ValueSize != 0 && spec.ValueSize != 4 { + return nil, errors.New("ValueSize must be zero or four for perf event array") + } + + spec = spec.Copy() + spec.KeySize = 4 + spec.ValueSize = 4 + + n, err := PossibleCPU() + if err != nil { + return nil, fmt.Errorf("fixup perf event array: %w", err) + } + + if n := uint32(n); spec.MaxEntries == 0 || spec.MaxEntries > n { + // MaxEntries should be zero most of the time, but there is code + // out there which hardcodes large constants. Clamp the number + // of entries to the number of CPUs at most. Allow creating maps with + // less than n items since some kernel selftests relied on this + // behaviour in the past. + spec.MaxEntries = n + } + + case spec.Type == CPUMap: + n, err := PossibleCPU() + if err != nil { + return nil, fmt.Errorf("fixup cpu map: %w", err) + } + + if n := uint32(n); spec.MaxEntries == 0 || spec.MaxEntries > n { + // Perform clamping similar to PerfEventArray. + spec.MaxEntries = n + } + } + + return spec, nil } -func (ms *MapSpec) clampPerfEventArraySize() error { - if ms.Type != PerfEventArray { - return nil +// dataSection returns the contents and BTF Datasec descriptor of the spec. +func (ms *MapSpec) dataSection() ([]byte, *btf.Datasec, error) { + if ms.Value == nil { + return nil, nil, errMapNoBTFValue } - n, err := internal.PossibleCPUs() - if err != nil { - return fmt.Errorf("perf event array: %w", err) + ds, ok := ms.Value.(*btf.Datasec) + if !ok { + return nil, nil, fmt.Errorf("map value BTF is a %T, not a *btf.Datasec", ms.Value) } - if n := uint32(n); ms.MaxEntries > n { - ms.MaxEntries = n + if n := len(ms.Contents); n != 1 { + return nil, nil, fmt.Errorf("expected one key, found %d", n) } - return nil + kv := ms.Contents[0] + value, ok := kv.Value.([]byte) + if !ok { + return nil, nil, fmt.Errorf("value at first map key is %T, not []byte", kv.Value) + } + + return value, ds, nil +} + +func (ms *MapSpec) readOnly() bool { + return (ms.Flags & sys.BPF_F_RDONLY_PROG) > 0 +} + +func (ms *MapSpec) writeOnly() bool { + return (ms.Flags & sys.BPF_F_WRONLY_PROG) > 0 } // MapKV is used to initialize the contents of a Map. @@ -131,24 +225,47 @@ type MapKV struct { Value interface{} } -func (ms *MapSpec) checkCompatibility(m *Map) error { - switch { - case m.typ != ms.Type: - return fmt.Errorf("expected type %v, got %v: %w", ms.Type, m.typ, ErrMapIncompatible) +// Compatible returns nil if an existing map may be used instead of creating +// one from the spec. +// +// Returns an error wrapping [ErrMapIncompatible] otherwise. +func (ms *MapSpec) Compatible(m *Map) error { + ms, err := ms.fixupMagicFields() + if err != nil { + return err + } - case m.keySize != ms.KeySize: - return fmt.Errorf("expected key size %v, got %v: %w", ms.KeySize, m.keySize, ErrMapIncompatible) + diffs := []string{} + if m.typ != ms.Type { + diffs = append(diffs, fmt.Sprintf("Type: %s changed to %s", m.typ, ms.Type)) + } + if m.keySize != ms.KeySize { + diffs = append(diffs, fmt.Sprintf("KeySize: %d changed to %d", m.keySize, ms.KeySize)) + } + if m.valueSize != ms.ValueSize { + diffs = append(diffs, fmt.Sprintf("ValueSize: %d changed to %d", m.valueSize, ms.ValueSize)) + } + if m.maxEntries != ms.MaxEntries { + diffs = append(diffs, fmt.Sprintf("MaxEntries: %d changed to %d", m.maxEntries, ms.MaxEntries)) + } - case m.valueSize != ms.ValueSize: - return fmt.Errorf("expected value size %v, got %v: %w", ms.ValueSize, m.valueSize, ErrMapIncompatible) + flags := ms.Flags + if ms.Type == DevMap || ms.Type == DevMapHash { + // As of 0cdbb4b09a06 ("devmap: Allow map lookups from eBPF") + // BPF_F_RDONLY_PROG is set unconditionally for devmaps. Explicitly + // allow this mismatch. + flags |= (m.flags & sys.BPF_F_RDONLY_PROG) + } - case m.maxEntries != ms.MaxEntries: - return fmt.Errorf("expected max entries %v, got %v: %w", ms.MaxEntries, m.maxEntries, ErrMapIncompatible) + if m.flags != flags { + diffs = append(diffs, fmt.Sprintf("Flags: %d changed to %d", m.flags, flags)) + } - case m.flags != ms.Flags: - return fmt.Errorf("expected flags %v, got %v: %w", ms.Flags, m.flags, ErrMapIncompatible) + if len(diffs) == 0 { + return nil } - return nil + + return fmt.Errorf("%s: %w", strings.Join(diffs, ", "), ErrMapIncompatible) } // Map represents a Map file descriptor. @@ -171,11 +288,15 @@ type Map struct { pinnedPath string // Per CPU maps return values larger than the size in the spec fullValueSize int + + memory *Memory } -// NewMapFromFD creates a map from a raw fd. +// NewMapFromFD creates a [Map] around a raw fd. // // You should not use fd after calling this function. +// +// Requires at least Linux 4.13. func NewMapFromFD(fd int) (*Map, error) { f, err := sys.NewFD(fd) if err != nil { @@ -186,13 +307,13 @@ func NewMapFromFD(fd int) (*Map, error) { } func newMapFromFD(fd *sys.FD) (*Map, error) { - info, err := newMapInfoFromFd(fd) + info, err := minimalMapInfoFromFd(fd) if err != nil { fd.Close() return nil, fmt.Errorf("get map info: %w", err) } - return newMap(fd, info.Name, info.Type, info.KeySize, info.ValueSize, info.MaxEntries, info.Flags) + return newMapFromParts(fd, info.Name, info.Type, info.KeySize, info.ValueSize, info.MaxEntries, info.Flags) } // NewMap creates a new Map. @@ -213,23 +334,20 @@ func NewMap(spec *MapSpec) (*Map, error) { // // May return an error wrapping ErrMapIncompatible. func NewMapWithOptions(spec *MapSpec, opts MapOptions) (*Map, error) { - handles := newHandleCache() - defer handles.close() - - m, err := newMapWithOptions(spec, opts, handles) + m, err := newMapWithOptions(spec, opts, btf.NewCache()) if err != nil { return nil, fmt.Errorf("creating map: %w", err) } - err = m.finalize(spec) - if err != nil { + if err := m.finalize(spec); err != nil { + m.Close() return nil, fmt.Errorf("populating map: %w", err) } return m, nil } -func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ *Map, err error) { +func newMapWithOptions(spec *MapSpec, opts MapOptions, c *btf.Cache) (_ *Map, err error) { closeOnError := func(c io.Closer) { if err != nil { c.Close() @@ -256,7 +374,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ } defer closeOnError(m) - if err := spec.checkCompatibility(m); err != nil { + if err := spec.Compatible(m); err != nil { return nil, fmt.Errorf("use pinned map %s: %w", spec.Name, err) } @@ -270,7 +388,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ } var innerFd *sys.FD - if spec.Type == ArrayOfMaps || spec.Type == HashOfMaps { + if spec.Type.canStoreMap() { if spec.InnerMap == nil { return nil, fmt.Errorf("%s requires InnerMap", spec.Type) } @@ -279,7 +397,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ return nil, errors.New("inner maps cannot be pinned") } - template, err := spec.InnerMap.createMap(nil, opts, handles) + template, err := spec.InnerMap.createMap(nil, c) if err != nil { return nil, fmt.Errorf("inner map: %w", err) } @@ -291,7 +409,7 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ innerFd = template.fd } - m, err := spec.createMap(innerFd, opts, handles) + m, err := spec.createMap(innerFd, c) if err != nil { return nil, err } @@ -300,24 +418,99 @@ func newMapWithOptions(spec *MapSpec, opts MapOptions, handles *handleCache) (_ if spec.Pinning == PinByName { path := filepath.Join(opts.PinPath, spec.Name) if err := m.Pin(path); err != nil { - return nil, fmt.Errorf("pin map: %w", err) + return nil, fmt.Errorf("pin map to %s: %w", path, err) } } return m, nil } +// Memory returns a memory-mapped region for the Map. The Map must have been +// created with the BPF_F_MMAPABLE flag. Repeated calls to Memory return the +// same mapping. Callers are responsible for coordinating access to Memory. +func (m *Map) Memory() (*Memory, error) { + if m.memory != nil { + return m.memory, nil + } + + if m.flags&sys.BPF_F_MMAPABLE == 0 { + return nil, fmt.Errorf("Map was not created with the BPF_F_MMAPABLE flag: %w", ErrNotSupported) + } + + size, err := m.memorySize() + if err != nil { + return nil, err + } + + mm, err := newMemory(m.FD(), size) + if err != nil { + return nil, fmt.Errorf("creating new Memory: %w", err) + } + + m.memory = mm + + return mm, nil +} + +// unsafeMemory returns a heap-mapped memory region for the Map. The Map must +// have been created with the BPF_F_MMAPABLE flag. Repeated calls to Memory +// return the same mapping. Callers are responsible for coordinating access to +// Memory. +func (m *Map) unsafeMemory() (*Memory, error) { + if m.memory != nil { + if !m.memory.heap { + return nil, errors.New("unsafeMemory would return existing non-heap memory") + } + + return m.memory, nil + } + + if m.flags&sys.BPF_F_MMAPABLE == 0 { + return nil, fmt.Errorf("Map was not created with the BPF_F_MMAPABLE flag: %w", ErrNotSupported) + } + + size, err := m.memorySize() + if err != nil { + return nil, err + } + + mm, err := newUnsafeMemory(m.FD(), size) + if err != nil { + return nil, fmt.Errorf("creating new Memory: %w", err) + } + + m.memory = mm + + return mm, nil +} + +func (m *Map) memorySize() (int, error) { + switch m.Type() { + case Array: + // In Arrays, values are always laid out on 8-byte boundaries regardless of + // architecture. Multiply by MaxEntries and align the result to the host's + // page size. + size := int(internal.Align(m.ValueSize(), 8) * m.MaxEntries()) + size = internal.Align(size, os.Getpagesize()) + return size, nil + case Arena: + // For Arenas, MaxEntries denotes the maximum number of pages available to + // the arena. + return int(m.MaxEntries()) * os.Getpagesize(), nil + } + + return 0, fmt.Errorf("determine memory size of map type %s: %w", m.Type(), ErrNotSupported) +} + // createMap validates the spec's properties and creates the map in the kernel // using the given opts. It does not populate or freeze the map. -func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions, handles *handleCache) (_ *Map, err error) { +func (spec *MapSpec) createMap(inner *sys.FD, c *btf.Cache) (_ *Map, err error) { closeOnError := func(closer io.Closer) { if err != nil { closer.Close() } } - spec = spec.Copy() - // Kernels 4.13 through 5.4 used a struct bpf_map_def that contained // additional 'inner_map_idx' and later 'numa_node' fields. // In order to support loading these definitions, tolerate the presence of @@ -328,111 +521,172 @@ func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions, handles *handleCa } } - switch spec.Type { - case ArrayOfMaps, HashOfMaps: - if err := haveNestedMaps(); err != nil { - return nil, err - } - - if spec.ValueSize != 0 && spec.ValueSize != 4 { - return nil, errors.New("ValueSize must be zero or four for map of map") - } - spec.ValueSize = 4 - - case PerfEventArray: - if spec.KeySize != 0 && spec.KeySize != 4 { - return nil, errors.New("KeySize must be zero or four for perf event array") - } - spec.KeySize = 4 - - if spec.ValueSize != 0 && spec.ValueSize != 4 { - return nil, errors.New("ValueSize must be zero or four for perf event array") - } - spec.ValueSize = 4 - - if spec.MaxEntries == 0 { - n, err := internal.PossibleCPUs() - if err != nil { - return nil, fmt.Errorf("perf event array: %w", err) - } - spec.MaxEntries = uint32(n) - } + spec, err = spec.fixupMagicFields() + if err != nil { + return nil, err } - if spec.Flags&(unix.BPF_F_RDONLY_PROG|unix.BPF_F_WRONLY_PROG) > 0 || spec.Freeze { - if err := haveMapMutabilityModifiers(); err != nil { - return nil, fmt.Errorf("map create: %w", err) - } - } - if spec.Flags&unix.BPF_F_MMAPABLE > 0 { - if err := haveMmapableMaps(); err != nil { - return nil, fmt.Errorf("map create: %w", err) - } - } - if spec.Flags&unix.BPF_F_INNER_MAP > 0 { - if err := haveInnerMaps(); err != nil { - return nil, fmt.Errorf("map create: %w", err) - } - } - if spec.Flags&unix.BPF_F_NO_PREALLOC > 0 { - if err := haveNoPreallocMaps(); err != nil { - return nil, fmt.Errorf("map create: %w", err) - } + p, sysMapType := platform.DecodeConstant(spec.Type) + if p != platform.Native { + return nil, fmt.Errorf("map type %s (%s): %w", spec.Type, p, internal.ErrNotSupportedOnOS) } attr := sys.MapCreateAttr{ - MapType: sys.MapType(spec.Type), + MapName: maybeFillObjName(spec.Name), + MapType: sys.MapType(sysMapType), KeySize: spec.KeySize, ValueSize: spec.ValueSize, MaxEntries: spec.MaxEntries, MapFlags: spec.Flags, NumaNode: spec.NumaNode, + MapExtra: spec.MapExtra, } if inner != nil { attr.InnerMapFd = inner.Uint() } - if haveObjName() == nil { - attr.MapName = sys.NewObjName(spec.Name) - } - - if spec.hasBTF() { - handle, err := handles.btfHandle(spec.BTF.Spec) + if spec.Key != nil || spec.Value != nil { + handle, keyTypeID, valueTypeID, err := btf.MarshalMapKV(spec.Key, spec.Value) if err != nil && !errors.Is(err, btf.ErrNotSupported) { return nil, fmt.Errorf("load BTF: %w", err) } if handle != nil { + defer handle.Close() + + // Use BTF k/v during map creation. attr.BtfFd = uint32(handle.FD()) - attr.BtfKeyTypeId = uint32(spec.BTF.Key.ID()) - attr.BtfValueTypeId = uint32(spec.BTF.Value.ID()) + attr.BtfKeyTypeId = keyTypeID + attr.BtfValueTypeId = valueTypeID + } + + if spec.Type == StructOpsMap { + if handle == nil { + return nil, fmt.Errorf("struct_ops requires BTF") + } + + localValue, ok := btf.As[*btf.Struct](spec.Value) + if !ok { + return nil, fmt.Errorf("struct_ops: value must be struct") + } + + targetValue, targetID, module, err := structOpsFindTarget(localValue, c) + if err != nil { + return nil, fmt.Errorf("struct_ops: %w", err) + } + defer module.Close() + + spec = spec.Copy() + spec.ValueSize = targetValue.Size + + attr.ValueSize = targetValue.Size + attr.BtfVmlinuxValueTypeId = targetID + + if module != nil { + // BPF_F_VTYPE_BTF_OBJ_FD is required if the type comes from a module + attr.MapFlags |= sys.BPF_F_VTYPE_BTF_OBJ_FD + // set FD for the kernel module + attr.ValueTypeBtfObjFd = int32(module.FD()) + } + + // StructOpsMap forbids passing BtfKeyTypeId or BtfValueTypeId, but + // requires BtfFd. Do the simple thing and just zero out the fields. + // See https://github.com/torvalds/linux/blob/9b332cece987ee1790b2ed4c989e28162fa47860/kernel/bpf/syscall.c#L1382-L1384 + attr.BtfKeyTypeId = 0 + attr.BtfValueTypeId = 0 } } fd, err := sys.MapCreate(&attr) + + // Some map types don't support BTF k/v in earlier kernel versions. + // Remove BTF metadata and retry map creation. + if (errors.Is(err, sys.ENOTSUPP) || errors.Is(err, unix.EINVAL)) && attr.BtfFd != 0 { + attr.BtfFd, attr.BtfKeyTypeId, attr.BtfValueTypeId = 0, 0, 0 + fd, err = sys.MapCreate(&attr) + } if err != nil { - if errors.Is(err, unix.EPERM) { - return nil, fmt.Errorf("map create: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err) - } - if !spec.hasBTF() { - return nil, fmt.Errorf("map create without BTF: %w", err) - } - return nil, fmt.Errorf("map create: %w", err) + return nil, handleMapCreateError(attr, spec, err) } - defer closeOnError(fd) - m, err := newMap(fd, spec.Name, spec.Type, spec.KeySize, spec.ValueSize, spec.MaxEntries, spec.Flags) + defer closeOnError(fd) + m, err := newMapFromParts(fd, spec.Name, spec.Type, spec.KeySize, spec.ValueSize, spec.MaxEntries, spec.Flags) if err != nil { return nil, fmt.Errorf("map create: %w", err) } - return m, nil } -// newMap allocates and returns a new Map structure. +func handleMapCreateError(attr sys.MapCreateAttr, spec *MapSpec, err error) error { + if platform.IsWindows { + if errors.Is(err, unix.EINVAL) && attr.MapFlags != 0 { + return fmt.Errorf("map create: flags: %w", internal.ErrNotSupportedOnOS) + } + + return err + } + + if errors.Is(err, unix.EPERM) { + return fmt.Errorf("map create: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err) + } + if errors.Is(err, unix.EINVAL) { + if spec.MaxEntries == 0 { + return fmt.Errorf("map create: %w (MaxEntries may be incorrectly set to zero)", err) + } + if spec.Type == UnspecifiedMap { + return fmt.Errorf("map create: cannot use type %s", UnspecifiedMap) + } + if spec.Flags&sys.BPF_F_NO_PREALLOC != 0 && !spec.Type.mustHaveNoPrealloc() { + return fmt.Errorf("map create: %w (BPF_F_NO_PREALLOC flag may be incompatible with map type %s)", err, spec.Type) + } + if spec.Flags&sys.BPF_F_NO_PREALLOC == 0 && spec.Type.mustHaveNoPrealloc() { + return fmt.Errorf("map create: %w (BPF_F_NO_PREALLOC flag may need to be set for map type %s)", err, spec.Type) + } + } + + if spec.Type.canStoreMap() { + if haveFeatErr := haveNestedMaps(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + + if spec.readOnly() || spec.writeOnly() { + if haveFeatErr := haveMapMutabilityModifiers(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + if spec.Flags&sys.BPF_F_MMAPABLE > 0 { + if haveFeatErr := haveMmapableMaps(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + if spec.Flags&sys.BPF_F_INNER_MAP > 0 { + if haveFeatErr := haveInnerMaps(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + if spec.Flags&sys.BPF_F_NO_PREALLOC > 0 { + if haveFeatErr := haveNoPreallocMaps(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + // BPF_MAP_TYPE_RINGBUF's max_entries must be a power-of-2 multiple of kernel's page size. + if errors.Is(err, unix.EINVAL) && + (attr.MapType == sys.BPF_MAP_TYPE_RINGBUF || attr.MapType == sys.BPF_MAP_TYPE_USER_RINGBUF) { + pageSize := uint32(os.Getpagesize()) + maxEntries := attr.MaxEntries + if maxEntries%pageSize != 0 || !internal.IsPow(maxEntries) { + return fmt.Errorf("map create: %w (ring map size %d not a multiple of page size %d)", err, maxEntries, pageSize) + } + } + + return fmt.Errorf("map create: %w", err) +} + +// newMapFromParts allocates and returns a new Map structure. // Sets the fullValueSize on per-CPU maps. -func newMap(fd *sys.FD, name string, typ MapType, keySize, valueSize, maxEntries, flags uint32) (*Map, error) { +func newMapFromParts(fd *sys.FD, name string, typ MapType, keySize, valueSize, maxEntries, flags uint32) (*Map, error) { m := &Map{ name, fd, @@ -443,18 +697,19 @@ func newMap(fd *sys.FD, name string, typ MapType, keySize, valueSize, maxEntries flags, "", int(valueSize), + nil, } if !typ.hasPerCPUValue() { return m, nil } - possibleCPUs, err := internal.PossibleCPUs() + possibleCPUs, err := PossibleCPU() if err != nil { return nil, err } - m.fullValueSize = internal.Align(int(valueSize), 8) * possibleCPUs + m.fullValueSize = int(internal.Align(valueSize, 8)) * possibleCPUs return m, nil } @@ -490,16 +745,39 @@ func (m *Map) Flags() uint32 { return m.flags } -// Info returns metadata about the map. +// Info returns metadata about the map. This was first introduced in Linux 4.5, +// but newer kernels support more MapInfo fields with the introduction of more +// features. See [MapInfo] and its methods for more details. +// +// Returns an error wrapping [ErrNotSupported] if the kernel supports neither +// BPF_OBJ_GET_INFO_BY_FD nor reading map information from /proc/self/fdinfo. func (m *Map) Info() (*MapInfo, error) { return newMapInfoFromFd(m.fd) } +// Handle returns a reference to the Map's type information in the kernel. +// +// Returns [ErrNotSupported] if the kernel has no BTF support, or if there is no +// BTF associated with the Map. +func (m *Map) Handle() (*btf.Handle, error) { + info, err := m.Info() + if err != nil { + return nil, err + } + + id, ok := info.BTFID() + if !ok { + return nil, fmt.Errorf("map %s: retrieve BTF ID: %w", m, ErrNotSupported) + } + + return btf.NewHandleFromID(id) +} + // MapLookupFlags controls the behaviour of the map lookup calls. type MapLookupFlags uint64 // LookupLock look up the value of a spin-locked map. -const LookupLock MapLookupFlags = 4 +const LookupLock MapLookupFlags = sys.BPF_F_LOCK // Lookup retrieves a value from a Map. // @@ -508,12 +786,7 @@ const LookupLock MapLookupFlags = 4 // // Returns an error if the key doesn't exist, see ErrKeyNotExist. func (m *Map) Lookup(key, valueOut interface{}) error { - valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize) - if err := m.lookup(key, valuePtr, 0); err != nil { - return err - } - - return m.unmarshalValue(valueOut, valueBytes) + return m.LookupWithFlags(key, valueOut, 0) } // LookupWithFlags retrieves a value from a Map with flags. @@ -527,8 +800,12 @@ func (m *Map) Lookup(key, valueOut interface{}) error { // // Returns an error if the key doesn't exist, see ErrKeyNotExist. func (m *Map) LookupWithFlags(key, valueOut interface{}, flags MapLookupFlags) error { - valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize) - if err := m.lookup(key, valuePtr, flags); err != nil { + if m.typ.hasPerCPUValue() { + return m.lookupPerCPU(key, valueOut, flags) + } + + valueBytes := makeMapSyscallOutput(valueOut, m.fullValueSize) + if err := m.lookup(key, valueBytes.Pointer(), flags); err != nil { return err } @@ -539,7 +816,7 @@ func (m *Map) LookupWithFlags(key, valueOut interface{}, flags MapLookupFlags) e // // Returns ErrKeyNotExist if the key doesn't exist. func (m *Map) LookupAndDelete(key, valueOut interface{}) error { - return m.lookupAndDelete(key, valueOut, 0) + return m.LookupAndDeleteWithFlags(key, valueOut, 0) } // LookupAndDeleteWithFlags retrieves and deletes a value from a Map. @@ -550,7 +827,15 @@ func (m *Map) LookupAndDelete(key, valueOut interface{}) error { // // Returns ErrKeyNotExist if the key doesn't exist. func (m *Map) LookupAndDeleteWithFlags(key, valueOut interface{}, flags MapLookupFlags) error { - return m.lookupAndDelete(key, valueOut, flags) + if m.typ.hasPerCPUValue() { + return m.lookupAndDeletePerCPU(key, valueOut, flags) + } + + valueBytes := makeMapSyscallOutput(valueOut, m.fullValueSize) + if err := m.lookupAndDelete(key, valueBytes.Pointer(), flags); err != nil { + return err + } + return m.unmarshalValue(valueOut, valueBytes) } // LookupBytes gets a value from Map. @@ -558,7 +843,7 @@ func (m *Map) LookupAndDeleteWithFlags(key, valueOut interface{}, flags MapLooku // Returns a nil value if a key doesn't exist. func (m *Map) LookupBytes(key interface{}) ([]byte, error) { valueBytes := make([]byte, m.fullValueSize) - valuePtr := sys.NewSlicePointer(valueBytes) + valuePtr := sys.UnsafeSlicePointer(valueBytes) err := m.lookup(key, valuePtr, 0) if errors.Is(err, ErrKeyNotExist) { @@ -568,6 +853,18 @@ func (m *Map) LookupBytes(key interface{}) ([]byte, error) { return valueBytes, err } +func (m *Map) lookupPerCPU(key, valueOut any, flags MapLookupFlags) error { + slice, err := ensurePerCPUSlice(valueOut) + if err != nil { + return err + } + valueBytes := make([]byte, m.fullValueSize) + if err := m.lookup(key, sys.UnsafeSlicePointer(valueBytes), flags); err != nil { + return err + } + return unmarshalPerCPUValue(slice, int(m.valueSize), valueBytes) +} + func (m *Map) lookup(key interface{}, valueOut sys.Pointer, flags MapLookupFlags) error { keyPtr, err := m.marshalKey(key) if err != nil { @@ -582,14 +879,65 @@ func (m *Map) lookup(key interface{}, valueOut sys.Pointer, flags MapLookupFlags } if err = sys.MapLookupElem(&attr); err != nil { + if errors.Is(err, unix.ENOENT) { + return errMapLookupKeyNotExist + } return fmt.Errorf("lookup: %w", wrapMapError(err)) } return nil } -func (m *Map) lookupAndDelete(key, valueOut interface{}, flags MapLookupFlags) error { - valuePtr, valueBytes := makeBuffer(valueOut, m.fullValueSize) +func (m *Map) lookupAndDeletePerCPU(key, valueOut any, flags MapLookupFlags) error { + slice, err := ensurePerCPUSlice(valueOut) + if err != nil { + return err + } + valueBytes := make([]byte, m.fullValueSize) + if err := m.lookupAndDelete(key, sys.UnsafeSlicePointer(valueBytes), flags); err != nil { + return err + } + return unmarshalPerCPUValue(slice, int(m.valueSize), valueBytes) +} + +// ensurePerCPUSlice allocates a slice for a per-CPU value if necessary. +func ensurePerCPUSlice(sliceOrPtr any) (any, error) { + sliceOrPtrType := reflect.TypeOf(sliceOrPtr) + if sliceOrPtrType.Kind() == reflect.Slice { + // The target is a slice, the caller is responsible for ensuring that + // size is correct. + return sliceOrPtr, nil + } + + slicePtrType := sliceOrPtrType + if slicePtrType.Kind() != reflect.Ptr || slicePtrType.Elem().Kind() != reflect.Slice { + return nil, fmt.Errorf("per-cpu value requires a slice or a pointer to slice") + } + + possibleCPUs, err := PossibleCPU() + if err != nil { + return nil, err + } + + sliceType := slicePtrType.Elem() + slice := reflect.MakeSlice(sliceType, possibleCPUs, possibleCPUs) + + sliceElemType := sliceType.Elem() + sliceElemIsPointer := sliceElemType.Kind() == reflect.Ptr + reflect.ValueOf(sliceOrPtr).Elem().Set(slice) + if !sliceElemIsPointer { + return slice.Interface(), nil + } + sliceElemType = sliceElemType.Elem() + + for i := 0; i < possibleCPUs; i++ { + newElem := reflect.New(sliceElemType) + slice.Index(i).Set(newElem) + } + + return slice.Interface(), nil +} +func (m *Map) lookupAndDelete(key any, valuePtr sys.Pointer, flags MapLookupFlags) error { keyPtr, err := m.marshalKey(key) if err != nil { return fmt.Errorf("can't marshal key: %w", err) @@ -606,7 +954,7 @@ func (m *Map) lookupAndDelete(key, valueOut interface{}, flags MapLookupFlags) e return fmt.Errorf("lookup and delete: %w", wrapMapError(err)) } - return m.unmarshalValue(valueOut, valueBytes) + return nil } // MapUpdateFlags controls the behaviour of the Map.Update call. @@ -633,15 +981,32 @@ func (m *Map) Put(key, value interface{}) error { } // Update changes the value of a key. -func (m *Map) Update(key, value interface{}, flags MapUpdateFlags) error { - keyPtr, err := m.marshalKey(key) - if err != nil { - return fmt.Errorf("can't marshal key: %w", err) +func (m *Map) Update(key, value any, flags MapUpdateFlags) error { + if m.typ.hasPerCPUValue() { + return m.updatePerCPU(key, value, flags) } valuePtr, err := m.marshalValue(value) if err != nil { - return fmt.Errorf("can't marshal value: %w", err) + return fmt.Errorf("marshal value: %w", err) + } + + return m.update(key, valuePtr, flags) +} + +func (m *Map) updatePerCPU(key, value any, flags MapUpdateFlags) error { + valuePtr, err := marshalPerCPUValue(value, int(m.valueSize)) + if err != nil { + return fmt.Errorf("marshal value: %w", err) + } + + return m.update(key, valuePtr, flags) +} + +func (m *Map) update(key any, valuePtr sys.Pointer, flags MapUpdateFlags) error { + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("marshal key: %w", err) } attr := sys.MapUpdateElemAttr{ @@ -684,13 +1049,13 @@ func (m *Map) Delete(key interface{}) error { // // Returns ErrKeyNotExist if there is no next key. func (m *Map) NextKey(key, nextKeyOut interface{}) error { - nextKeyPtr, nextKeyBytes := makeBuffer(nextKeyOut, int(m.keySize)) + nextKeyBytes := makeMapSyscallOutput(nextKeyOut, int(m.keySize)) - if err := m.nextKey(key, nextKeyPtr); err != nil { + if err := m.nextKey(key, nextKeyBytes.Pointer()); err != nil { return err } - if err := m.unmarshalKey(nextKeyOut, nextKeyBytes); err != nil { + if err := nextKeyBytes.Unmarshal(nextKeyOut); err != nil { return fmt.Errorf("can't unmarshal next key: %w", err) } return nil @@ -705,7 +1070,7 @@ func (m *Map) NextKey(key, nextKeyOut interface{}) error { // Returns nil if there are no more keys. func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) { nextKey := make([]byte, m.keySize) - nextKeyPtr := sys.NewSlicePointer(nextKey) + nextKeyPtr := sys.UnsafeSlicePointer(nextKey) err := m.nextKey(key, nextKeyPtr) if errors.Is(err, ErrKeyNotExist) { @@ -737,15 +1102,15 @@ func (m *Map) nextKey(key interface{}, nextKeyOut sys.Pointer) error { if err = sys.MapGetNextKey(&attr); err != nil { // Kernels 4.4.131 and earlier return EFAULT instead of a pointer to the // first map element when a nil key pointer is specified. - if key == nil && errors.Is(err, unix.EFAULT) { - var guessKey sys.Pointer + if platform.IsLinux && key == nil && errors.Is(err, unix.EFAULT) { + var guessKey []byte guessKey, err = m.guessNonExistentKey() if err != nil { - return fmt.Errorf("can't guess starting key: %w", err) + return err } // Retry the syscall with a valid non-existing key. - attr.Key = guessKey + attr.Key = sys.UnsafeSlicePointer(guessKey) if err = sys.MapGetNextKey(&attr); err == nil { return nil } @@ -757,12 +1122,22 @@ func (m *Map) nextKey(key interface{}, nextKeyOut sys.Pointer) error { return nil } +var mmapProtectedPage = sync.OnceValues(func() ([]byte, error) { + return unix.Mmap(-1, 0, os.Getpagesize(), unix.PROT_NONE, unix.MAP_ANON|unix.MAP_SHARED) +}) + // guessNonExistentKey attempts to perform a map lookup that returns ENOENT. // This is necessary on kernels before 4.4.132, since those don't support // iterating maps from the start by providing an invalid key pointer. -func (m *Map) guessNonExistentKey() (startKey sys.Pointer, err error) { - // Provide an invalid value pointer to prevent a copy on the kernel side. - valuePtr := sys.NewPointer(unsafe.Pointer(^uintptr(0))) +func (m *Map) guessNonExistentKey() ([]byte, error) { + // Map a protected page and use that as the value pointer. This saves some + // work copying out the value, which we're not interested in. + page, err := mmapProtectedPage() + if err != nil { + return nil, err + } + valuePtr := sys.UnsafeSlicePointer(page) + randKey := make([]byte, int(m.keySize)) for i := 0; i < 4; i++ { @@ -792,25 +1167,34 @@ func (m *Map) guessNonExistentKey() (startKey sys.Pointer, err error) { err := m.lookup(randKey, valuePtr, 0) if errors.Is(err, ErrKeyNotExist) { - return sys.NewSlicePointer(randKey), nil + return randKey, nil } } - return sys.Pointer{}, errFirstKeyNotFound + return nil, errors.New("couldn't find non-existing key") } // BatchLookup looks up many elements in a map at once. // // "keysOut" and "valuesOut" must be of type slice, a pointer // to a slice or buffer will not work. -// "prevKey" is the key to start the batch lookup from, it will -// *not* be included in the results. Use nil to start at the first key. +// "cursor" is an pointer to an opaque handle. It must be non-nil. Pass +// "cursor" to subsequent calls of this function to continue the batching +// operation in the case of chunking. +// +// Warning: This API is not very safe to use as the kernel implementation for +// batching relies on the user to be aware of subtle details with regarding to +// different map type implementations. // // ErrKeyNotExist is returned when the batch lookup has reached // the end of all possible results, even when partial results // are returned. It should be used to evaluate when lookup is "done". -func (m *Map) BatchLookup(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { - return m.batchLookup(sys.BPF_MAP_LOOKUP_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts) +func (m *Map) BatchLookup(cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + n, err := m.batchLookup(sys.BPF_MAP_LOOKUP_BATCH, cursor, keysOut, valuesOut, opts) + if err != nil { + return n, fmt.Errorf("map batch lookup: %w", err) + } + return n, nil } // BatchLookupAndDelete looks up many elements in a map at once, @@ -818,47 +1202,119 @@ func (m *Map) BatchLookup(prevKey, nextKeyOut, keysOut, valuesOut interface{}, o // It then deletes all those elements. // "keysOut" and "valuesOut" must be of type slice, a pointer // to a slice or buffer will not work. -// "prevKey" is the key to start the batch lookup from, it will -// *not* be included in the results. Use nil to start at the first key. +// "cursor" is an pointer to an opaque handle. It must be non-nil. Pass +// "cursor" to subsequent calls of this function to continue the batching +// operation in the case of chunking. +// +// Warning: This API is not very safe to use as the kernel implementation for +// batching relies on the user to be aware of subtle details with regarding to +// different map type implementations. // // ErrKeyNotExist is returned when the batch lookup has reached // the end of all possible results, even when partial results // are returned. It should be used to evaluate when lookup is "done". -func (m *Map) BatchLookupAndDelete(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { - return m.batchLookup(sys.BPF_MAP_LOOKUP_AND_DELETE_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts) +func (m *Map) BatchLookupAndDelete(cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + n, err := m.batchLookup(sys.BPF_MAP_LOOKUP_AND_DELETE_BATCH, cursor, keysOut, valuesOut, opts) + if err != nil { + return n, fmt.Errorf("map batch lookup and delete: %w", err) + } + return n, nil } -func (m *Map) batchLookup(cmd sys.Cmd, startKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { - if err := haveBatchAPI(); err != nil { +// MapBatchCursor represents a starting point for a batch operation. +type MapBatchCursor struct { + m *Map + opaque []byte +} + +func (m *Map) batchLookup(cmd sys.Cmd, cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + if m.typ.hasPerCPUValue() { + return m.batchLookupPerCPU(cmd, cursor, keysOut, valuesOut, opts) + } + + count, err := batchCount(keysOut, valuesOut) + if err != nil { return 0, err } - if m.typ.hasPerCPUValue() { - return 0, ErrNotSupported + + valueBuf := sysenc.SyscallOutput(valuesOut, count*int(m.fullValueSize)) + + n, sysErr := m.batchLookupCmd(cmd, cursor, count, keysOut, valueBuf.Pointer(), opts) + if errors.Is(sysErr, unix.ENOSPC) { + // Hash tables return ENOSPC when the size of the batch is smaller than + // any bucket. + return n, fmt.Errorf("%w (batch size too small?)", sysErr) + } else if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) { + return 0, sysErr + } + + err = valueBuf.Unmarshal(valuesOut) + if err != nil { + return 0, err + } + + return n, sysErr +} + +func (m *Map) batchLookupPerCPU(cmd sys.Cmd, cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + count, err := sliceLen(keysOut) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) + } + + valueBuf := sysenc.SyscallOutput(valuesOut, count*int(m.fullValueSize)) + + n, sysErr := m.batchLookupCmd(cmd, cursor, count, keysOut, valueBuf.Pointer(), opts) + if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) { + return 0, sysErr } - keysValue := reflect.ValueOf(keysOut) - if keysValue.Kind() != reflect.Slice { - return 0, fmt.Errorf("keys must be a slice") + + if bytesBuf := valueBuf.Bytes(); bytesBuf != nil { + err = unmarshalBatchPerCPUValue(valuesOut, count, int(m.valueSize), bytesBuf) + if err != nil { + return 0, err + } } - valuesValue := reflect.ValueOf(valuesOut) - if valuesValue.Kind() != reflect.Slice { - return 0, fmt.Errorf("valuesOut must be a slice") + + return n, sysErr +} + +func (m *Map) batchLookupCmd(cmd sys.Cmd, cursor *MapBatchCursor, count int, keysOut any, valuePtr sys.Pointer, opts *BatchOptions) (int, error) { + // * generic_map_lookup_batch requires that batch_out is key_size bytes. + // This is used by array and LPM maps. + // + // * __htab_map_lookup_and_delete_batch requires u32. This is used by the + // various hash maps. + // + // Use a minimum of 4 bytes to avoid having to distinguish between the two. + cursorLen := max(int(m.keySize), 4) + + inBatch := cursor.opaque + if inBatch == nil { + // This is the first lookup, allocate a buffer to hold the cursor. + cursor.opaque = make([]byte, cursorLen) + cursor.m = m + } else if cursor.m != m { + // Prevent reuse of a cursor across maps. First, it's unlikely to work. + // Second, the maps may require different cursorLen and cursor.opaque + // may therefore be too short. This could lead to the kernel clobbering + // user space memory. + return 0, errors.New("a cursor may not be reused across maps") } - count := keysValue.Len() - if count != valuesValue.Len() { - return 0, fmt.Errorf("keysOut and valuesOut must be the same length") + + if err := haveBatchAPI(); err != nil { + return 0, err } - keyBuf := make([]byte, count*int(m.keySize)) - keyPtr := sys.NewSlicePointer(keyBuf) - valueBuf := make([]byte, count*int(m.fullValueSize)) - valuePtr := sys.NewSlicePointer(valueBuf) - nextPtr, nextBuf := makeBuffer(nextKeyOut, int(m.keySize)) + + keyBuf := sysenc.SyscallOutput(keysOut, count*int(m.keySize)) attr := sys.MapLookupBatchAttr{ MapFd: m.fd.Uint(), - Keys: keyPtr, + Keys: keyBuf.Pointer(), Values: valuePtr, Count: uint32(count), - OutBatch: nextPtr, + InBatch: sys.UnsafeSlicePointer(inBatch), + OutBatch: sys.UnsafeSlicePointer(cursor.opaque), } if opts != nil { @@ -866,30 +1322,13 @@ func (m *Map) batchLookup(cmd sys.Cmd, startKey, nextKeyOut, keysOut, valuesOut attr.Flags = opts.Flags } - var err error - if startKey != nil { - attr.InBatch, err = marshalPtr(startKey, int(m.keySize)) - if err != nil { - return 0, err - } - } - _, sysErr := sys.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) sysErr = wrapMapError(sysErr) if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) { return 0, sysErr } - err = m.unmarshalKey(nextKeyOut, nextBuf) - if err != nil { - return 0, err - } - err = unmarshalBytes(keysOut, keyBuf) - if err != nil { - return 0, err - } - err = unmarshalBytes(valuesOut, valueBuf) - if err != nil { + if err := keyBuf.Unmarshal(keysOut); err != nil { return 0, err } @@ -901,33 +1340,25 @@ func (m *Map) batchLookup(cmd sys.Cmd, startKey, nextKeyOut, keysOut, valuesOut // "keys" and "values" must be of type slice, a pointer // to a slice or buffer will not work. func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, error) { - if err := haveBatchAPI(); err != nil { - return 0, err - } if m.typ.hasPerCPUValue() { - return 0, ErrNotSupported - } - keysValue := reflect.ValueOf(keys) - if keysValue.Kind() != reflect.Slice { - return 0, fmt.Errorf("keys must be a slice") + return m.batchUpdatePerCPU(keys, values, opts) } - valuesValue := reflect.ValueOf(values) - if valuesValue.Kind() != reflect.Slice { - return 0, fmt.Errorf("values must be a slice") - } - var ( - count = keysValue.Len() - valuePtr sys.Pointer - err error - ) - if count != valuesValue.Len() { - return 0, fmt.Errorf("keys and values must be the same length") + + count, err := batchCount(keys, values) + if err != nil { + return 0, err } - keyPtr, err := marshalPtr(keys, count*int(m.keySize)) + + valuePtr, err := marshalMapSyscallInput(values, count*int(m.valueSize)) if err != nil { return 0, err } - valuePtr, err = marshalPtr(values, count*int(m.valueSize)) + + return m.batchUpdate(count, keys, valuePtr, opts) +} + +func (m *Map) batchUpdate(count int, keys any, valuePtr sys.Pointer, opts *BatchOptions) (int, error) { + keyPtr, err := marshalMapSyscallInput(keys, count*int(m.keySize)) if err != nil { return 0, err } @@ -945,27 +1376,38 @@ func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, er err = sys.MapUpdateBatch(&attr) if err != nil { + if haveFeatErr := haveBatchAPI(); haveFeatErr != nil { + return 0, haveFeatErr + } return int(attr.Count), fmt.Errorf("batch update: %w", wrapMapError(err)) } return int(attr.Count), nil } +func (m *Map) batchUpdatePerCPU(keys, values any, opts *BatchOptions) (int, error) { + count, err := sliceLen(keys) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) + } + + valueBuf, err := marshalBatchPerCPUValue(values, count, int(m.valueSize)) + if err != nil { + return 0, err + } + + return m.batchUpdate(count, keys, sys.UnsafeSlicePointer(valueBuf), opts) +} + // BatchDelete batch deletes entries in the map by keys. // "keys" must be of type slice, a pointer to a slice or buffer will not work. func (m *Map) BatchDelete(keys interface{}, opts *BatchOptions) (int, error) { - if err := haveBatchAPI(); err != nil { - return 0, err - } - if m.typ.hasPerCPUValue() { - return 0, ErrNotSupported - } - keysValue := reflect.ValueOf(keys) - if keysValue.Kind() != reflect.Slice { - return 0, fmt.Errorf("keys must be a slice") + count, err := sliceLen(keys) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) } - count := keysValue.Len() - keyPtr, err := marshalPtr(keys, count*int(m.keySize)) + + keyPtr, err := marshalMapSyscallInput(keys, count*int(m.keySize)) if err != nil { return 0, fmt.Errorf("cannot marshal keys: %v", err) } @@ -982,12 +1424,33 @@ func (m *Map) BatchDelete(keys interface{}, opts *BatchOptions) (int, error) { } if err = sys.MapDeleteBatch(&attr); err != nil { + if haveFeatErr := haveBatchAPI(); haveFeatErr != nil { + return 0, haveFeatErr + } return int(attr.Count), fmt.Errorf("batch delete: %w", wrapMapError(err)) } return int(attr.Count), nil } +func batchCount(keys, values any) (int, error) { + keysLen, err := sliceLen(keys) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) + } + + valuesLen, err := sliceLen(values) + if err != nil { + return 0, fmt.Errorf("values: %w", err) + } + + if keysLen != valuesLen { + return 0, fmt.Errorf("keys and values must have the same length") + } + + return keysLen, nil +} + // Iterate traverses a map. // // It's safe to create multiple iterators at the same time. @@ -998,7 +1461,8 @@ func (m *Map) Iterate() *MapIterator { return newMapIterator(m) } -// Close removes a Map +// Close the Map's underlying file descriptor, which could unload the +// Map from the kernel if it is not pinned or in use by a loaded Program. func (m *Map) Close() error { if m == nil { // This makes it easier to clean up when iterating maps @@ -1043,6 +1507,7 @@ func (m *Map) Clone() (*Map, error) { m.flags, "", m.fullValueSize, + nil, }, nil } @@ -1053,9 +1518,10 @@ func (m *Map) Clone() (*Map, error) { // the new path already exists. Re-pinning across filesystems is not supported. // You can Clone a map to pin it to a different path. // -// This requires bpffs to be mounted above fileName. See https://docs.cilium.io/en/k8s-doc/admin/#admin-mount-bpffs +// This requires bpffs to be mounted above fileName. +// See https://docs.cilium.io/en/stable/network/kubernetes/configuration/#mounting-bpffs-with-systemd func (m *Map) Pin(fileName string) error { - if err := internal.Pin(m.pinnedPath, fileName, m.fd); err != nil { + if err := sys.Pin(m.pinnedPath, fileName, m.fd); err != nil { return err } m.pinnedPath = fileName @@ -1068,7 +1534,7 @@ func (m *Map) Pin(fileName string) error { // // Unpinning an unpinned Map returns nil. func (m *Map) Unpin() error { - if err := internal.Unpin(m.pinnedPath); err != nil { + if err := sys.Unpin(m.pinnedPath); err != nil { return err } m.pinnedPath = "" @@ -1084,15 +1550,14 @@ func (m *Map) IsPinned() bool { // // It makes no changes to kernel-side restrictions. func (m *Map) Freeze() error { - if err := haveMapMutabilityModifiers(); err != nil { - return fmt.Errorf("can't freeze map: %w", err) - } - attr := sys.MapFreezeAttr{ MapFd: m.fd.Uint(), } if err := sys.MapFreeze(&attr); err != nil { + if haveFeatErr := haveMapMutabilityModifiers(); haveFeatErr != nil { + return fmt.Errorf("can't freeze map: %w", haveFeatErr) + } return fmt.Errorf("can't freeze map: %w", err) } return nil @@ -1107,7 +1572,7 @@ func (m *Map) finalize(spec *MapSpec) error { } } - if spec.Freeze { + if isConstantDataSection(spec.Name) || isKconfigSection(spec.Name) { if err := m.Freeze(); err != nil { return fmt.Errorf("freezing map: %w", err) } @@ -1120,28 +1585,15 @@ func (m *Map) marshalKey(data interface{}) (sys.Pointer, error) { if data == nil { if m.keySize == 0 { // Queues have a key length of zero, so passing nil here is valid. - return sys.NewPointer(nil), nil + return sys.UnsafePointer(nil), nil } return sys.Pointer{}, errors.New("can't use nil as key of map") } - return marshalPtr(data, int(m.keySize)) -} - -func (m *Map) unmarshalKey(data interface{}, buf []byte) error { - if buf == nil { - // This is from a makeBuffer call, nothing do do here. - return nil - } - - return unmarshalBytes(data, buf) + return marshalMapSyscallInput(data, int(m.keySize)) } func (m *Map) marshalValue(data interface{}) (sys.Pointer, error) { - if m.typ.hasPerCPUValue() { - return marshalPerCPUValue(data, int(m.valueSize)) - } - var ( buf []byte err error @@ -1161,26 +1613,17 @@ func (m *Map) marshalValue(data interface{}) (sys.Pointer, error) { buf, err = marshalProgram(value, int(m.valueSize)) default: - return marshalPtr(data, int(m.valueSize)) + return marshalMapSyscallInput(data, int(m.valueSize)) } if err != nil { return sys.Pointer{}, err } - return sys.NewSlicePointer(buf), nil + return sys.UnsafeSlicePointer(buf), nil } -func (m *Map) unmarshalValue(value interface{}, buf []byte) error { - if buf == nil { - // This is from a makeBuffer call, nothing do do here. - return nil - } - - if m.typ.hasPerCPUValue() { - return unmarshalPerCPUValue(value, int(m.valueSize), buf) - } - +func (m *Map) unmarshalValue(value any, buf sysenc.Buffer) error { switch value := value.(type) { case **Map: if !m.typ.canStoreMap() { @@ -1227,12 +1670,14 @@ func (m *Map) unmarshalValue(value interface{}, buf []byte) error { return errors.New("require pointer to *Program") } - return unmarshalBytes(value, buf) + return buf.Unmarshal(value) } -// LoadPinnedMap loads a Map from a BPF file. +// LoadPinnedMap opens a Map from a pin (file) on the BPF virtual filesystem. +// +// Requires at least Linux 4.5. func LoadPinnedMap(fileName string, opts *LoadPinOptions) (*Map, error) { - fd, err := sys.ObjGet(&sys.ObjGetAttr{ + fd, typ, err := sys.ObjGetTyped(&sys.ObjGetAttr{ Pathname: sys.NewStringPointer(fileName), FileFlags: opts.Marshal(), }) @@ -1240,6 +1685,11 @@ func LoadPinnedMap(fileName string, opts *LoadPinOptions) (*Map, error) { return nil, err } + if typ != sys.BPF_TYPE_MAP { + _ = fd.Close() + return nil, fmt.Errorf("%s is not a Map", fileName) + } + m, err := newMapFromFD(fd) if err == nil { m.pinnedPath = fileName @@ -1249,17 +1699,20 @@ func LoadPinnedMap(fileName string, opts *LoadPinOptions) (*Map, error) { } // unmarshalMap creates a map from a map ID encoded in host endianness. -func unmarshalMap(buf []byte) (*Map, error) { - if len(buf) != 4 { - return nil, errors.New("map id requires 4 byte value") +func unmarshalMap(buf sysenc.Buffer) (*Map, error) { + var id uint32 + if err := buf.Unmarshal(&id); err != nil { + return nil, err } - - id := internal.NativeEndian.Uint32(buf) return NewMapFromID(MapID(id)) } // marshalMap marshals the fd of a map into a buffer in host endianness. func marshalMap(m *Map, length int) ([]byte, error) { + if m == nil { + return nil, errors.New("can't marshal a nil Map") + } + if length != 4 { return nil, fmt.Errorf("can't marshal map to %d bytes", length) } @@ -1269,67 +1722,14 @@ func marshalMap(m *Map, length int) ([]byte, error) { return buf, nil } -func patchValue(value []byte, typ btf.Type, replacements map[string]interface{}) error { - replaced := make(map[string]bool) - replace := func(name string, offset, size int, replacement interface{}) error { - if offset+size > len(value) { - return fmt.Errorf("%s: offset %d(+%d) is out of bounds", name, offset, size) - } - - buf, err := marshalBytes(replacement, size) - if err != nil { - return fmt.Errorf("marshal %s: %w", name, err) - } - - copy(value[offset:offset+size], buf) - replaced[name] = true - return nil - } - - switch parent := typ.(type) { - case *btf.Datasec: - for _, secinfo := range parent.Vars { - name := string(secinfo.Type.(*btf.Var).Name) - replacement, ok := replacements[name] - if !ok { - continue - } - - err := replace(name, int(secinfo.Offset), int(secinfo.Size), replacement) - if err != nil { - return err - } - } - - default: - return fmt.Errorf("patching %T is not supported", typ) - } - - if len(replaced) == len(replacements) { - return nil - } - - var missing []string - for name := range replacements { - if !replaced[name] { - missing = append(missing, name) - } - } - - if len(missing) == 1 { - return fmt.Errorf("unknown field: %s", missing[0]) - } - - return fmt.Errorf("unknown fields: %s", strings.Join(missing, ",")) -} - // MapIterator iterates a Map. // // See Map.Iterate. type MapIterator struct { - target *Map - prevKey interface{} - prevBytes []byte + target *Map + // Temporary storage to avoid allocations in Next(). This is any instead + // of []byte to avoid allocations. + cursor any count, maxEntries uint32 done bool err error @@ -1339,7 +1739,6 @@ func newMapIterator(target *Map) *MapIterator { return &MapIterator{ target: target, maxEntries: target.maxEntries, - prevBytes: make([]byte, target.keySize), } } @@ -1358,29 +1757,30 @@ func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool { return false } - // For array-like maps NextKeyBytes returns nil only on after maxEntries + // For array-like maps NextKey returns nil only after maxEntries // iterations. for mi.count <= mi.maxEntries { - var nextBytes []byte - nextBytes, mi.err = mi.target.NextKeyBytes(mi.prevKey) - if mi.err != nil { - return false + if mi.cursor == nil { + // Pass nil interface to NextKey to make sure the Map's first key + // is returned. If we pass an uninitialized []byte instead, it'll see a + // non-nil interface and try to marshal it. + mi.cursor = make([]byte, mi.target.keySize) + mi.err = mi.target.NextKey(nil, mi.cursor) + } else { + mi.err = mi.target.NextKey(mi.cursor, mi.cursor) } - if nextBytes == nil { + if errors.Is(mi.err, ErrKeyNotExist) { mi.done = true + mi.err = nil + return false + } else if mi.err != nil { + mi.err = fmt.Errorf("get next key: %w", mi.err) return false } - // The user can get access to nextBytes since unmarshalBytes - // does not copy when unmarshaling into a []byte. - // Make a copy to prevent accidental corruption of - // iterator state. - copy(mi.prevBytes, nextBytes) - mi.prevKey = mi.prevBytes - mi.count++ - mi.err = mi.target.Lookup(nextBytes, valueOut) + mi.err = mi.target.Lookup(mi.cursor, valueOut) if errors.Is(mi.err, ErrKeyNotExist) { // Even though the key should be valid, we couldn't look up // its value. If we're iterating a hash map this is probably @@ -1393,10 +1793,17 @@ func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool { continue } if mi.err != nil { + mi.err = fmt.Errorf("look up next key: %w", mi.err) return false } - mi.err = mi.target.unmarshalKey(keyOut, nextBytes) + buf := mi.cursor.([]byte) + if ptr, ok := keyOut.(unsafe.Pointer); ok { + copy(unsafe.Slice((*byte)(ptr), len(buf)), buf) + } else { + mi.err = sysenc.Unmarshal(keyOut, buf) + } + return mi.err == nil } @@ -1421,9 +1828,10 @@ func MapGetNextID(startID MapID) (MapID, error) { return MapID(attr.NextId), sys.MapGetNextId(attr) } -// NewMapFromID returns the map for a given id. +// NewMapFromID returns the [Map] for a given map id. Returns [ErrNotExist] if +// there is no eBPF map with the given id. // -// Returns ErrNotExist, if there is no eBPF map with the given id. +// Requires at least Linux 4.13. func NewMapFromID(id MapID) (*Map, error) { fd, err := sys.MapGetFdById(&sys.MapGetFdByIdAttr{ Id: uint32(id), @@ -1435,13 +1843,11 @@ func NewMapFromID(id MapID) (*Map, error) { return newMapFromFD(fd) } -// ID returns the systemwide unique ID of the map. -// -// Deprecated: use MapInfo.ID() instead. -func (m *Map) ID() (MapID, error) { - var info sys.MapInfo - if err := sys.ObjInfo(m.fd, &info); err != nil { - return MapID(0), err +// sliceLen returns the length if the value is a slice or an error otherwise. +func sliceLen(slice any) (int, error) { + sliceValue := reflect.ValueOf(slice) + if sliceValue.Kind() != reflect.Slice { + return 0, fmt.Errorf("%T is not a slice", slice) } - return MapID(info.Id), nil + return sliceValue.Len(), nil } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/marshalers.go b/src/nvcgo/vendor/github.com/cilium/ebpf/marshalers.go index 4351cc57f..d4e719c60 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/marshalers.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/marshalers.go @@ -1,254 +1,210 @@ package ebpf import ( - "bytes" "encoding" - "encoding/binary" "errors" "fmt" "reflect" - "runtime" - "sync" + "slices" "unsafe" "github.com/cilium/ebpf/internal" "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/sysenc" ) -// marshalPtr converts an arbitrary value into a pointer suitable +// marshalMapSyscallInput converts an arbitrary value into a pointer suitable // to be passed to the kernel. // // As an optimization, it returns the original value if it is an // unsafe.Pointer. -func marshalPtr(data interface{}, length int) (sys.Pointer, error) { +func marshalMapSyscallInput(data any, length int) (sys.Pointer, error) { if ptr, ok := data.(unsafe.Pointer); ok { - return sys.NewPointer(ptr), nil + return sys.UnsafePointer(ptr), nil } - buf, err := marshalBytes(data, length) + buf, err := sysenc.Marshal(data, length) if err != nil { return sys.Pointer{}, err } - return sys.NewSlicePointer(buf), nil + return buf.Pointer(), nil } -// marshalBytes converts an arbitrary value into a byte buffer. -// -// Prefer using Map.marshalKey and Map.marshalValue if possible, since -// those have special cases that allow more types to be encoded. -// -// Returns an error if the given value isn't representable in exactly -// length bytes. -func marshalBytes(data interface{}, length int) (buf []byte, err error) { - if data == nil { - return nil, errors.New("can't marshal a nil value") - } - - switch value := data.(type) { - case encoding.BinaryMarshaler: - buf, err = value.MarshalBinary() - case string: - buf = []byte(value) - case []byte: - buf = value - case unsafe.Pointer: - err = errors.New("can't marshal from unsafe.Pointer") - case Map, *Map, Program, *Program: - err = fmt.Errorf("can't marshal %T", value) - default: - var wr bytes.Buffer - err = binary.Write(&wr, internal.NativeEndian, value) - if err != nil { - err = fmt.Errorf("encoding %T: %v", value, err) - } - buf = wr.Bytes() - } - if err != nil { - return nil, err +func makeMapSyscallOutput(dst any, length int) sysenc.Buffer { + if ptr, ok := dst.(unsafe.Pointer); ok { + return sysenc.UnsafeBuffer(ptr) } - if len(buf) != length { - return nil, fmt.Errorf("%T doesn't marshal to %d bytes", data, length) + _, ok := dst.(encoding.BinaryUnmarshaler) + if ok { + return sysenc.SyscallOutput(nil, length) } - return buf, nil + + return sysenc.SyscallOutput(dst, length) } -func makeBuffer(dst interface{}, length int) (sys.Pointer, []byte) { - if ptr, ok := dst.(unsafe.Pointer); ok { - return sys.NewPointer(ptr), nil +// appendPerCPUSlice encodes a slice containing one value per +// possible CPU into a buffer of bytes. +// +// Values are initialized to zero if the slice has less elements than CPUs. +func appendPerCPUSlice(buf []byte, slice any, possibleCPUs, elemLength, alignedElemLength int) ([]byte, error) { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return nil, errors.New("per-CPU value requires slice") } - buf := make([]byte, length) - return sys.NewSlicePointer(buf), buf -} - -var bytesReaderPool = sync.Pool{ - New: func() interface{} { - return new(bytes.Reader) - }, -} + sliceValue := reflect.ValueOf(slice) + sliceLen := sliceValue.Len() + if sliceLen > possibleCPUs { + return nil, fmt.Errorf("per-CPU value greater than number of CPUs") + } -// unmarshalBytes converts a byte buffer into an arbitrary value. -// -// Prefer using Map.unmarshalKey and Map.unmarshalValue if possible, since -// those have special cases that allow more types to be encoded. -// -// The common int32 and int64 types are directly handled to avoid -// unnecessary heap allocations as happening in the default case. -func unmarshalBytes(data interface{}, buf []byte) error { - switch value := data.(type) { - case unsafe.Pointer: - var dst []byte - // Use unsafe.Slice when we drop support for pre1.17 (https://github.com/golang/go/issues/19367) - // We could opt for removing unsafe.Pointer support in the lib as well - sh := (*reflect.SliceHeader)(unsafe.Pointer(&dst)) - sh.Data = uintptr(value) - sh.Len = len(buf) - sh.Cap = len(buf) - - copy(dst, buf) - runtime.KeepAlive(value) - return nil - case Map, *Map, Program, *Program: - return fmt.Errorf("can't unmarshal into %T", value) - case encoding.BinaryUnmarshaler: - return value.UnmarshalBinary(buf) - case *string: - *value = string(buf) - return nil - case *[]byte: - *value = buf - return nil - case *int32: - if len(buf) < 4 { - return errors.New("int32 requires 4 bytes") - } - *value = int32(internal.NativeEndian.Uint32(buf)) - return nil - case *uint32: - if len(buf) < 4 { - return errors.New("uint32 requires 4 bytes") - } - *value = internal.NativeEndian.Uint32(buf) - return nil - case *int64: - if len(buf) < 8 { - return errors.New("int64 requires 8 bytes") - } - *value = int64(internal.NativeEndian.Uint64(buf)) - return nil - case *uint64: - if len(buf) < 8 { - return errors.New("uint64 requires 8 bytes") - } - *value = internal.NativeEndian.Uint64(buf) - return nil - case string: - return errors.New("require pointer to string") - case []byte: - return errors.New("require pointer to []byte") - default: - rd := bytesReaderPool.Get().(*bytes.Reader) - rd.Reset(buf) - defer bytesReaderPool.Put(rd) - if err := binary.Read(rd, internal.NativeEndian, value); err != nil { - return fmt.Errorf("decoding %T: %v", value, err) + // Grow increases the slice's capacity, _if_necessary_ + buf = slices.Grow(buf, alignedElemLength*possibleCPUs) + for i := 0; i < sliceLen; i++ { + elem := sliceValue.Index(i).Interface() + elemBytes, err := sysenc.Marshal(elem, elemLength) + if err != nil { + return nil, err } - return nil + + buf = elemBytes.AppendTo(buf) + buf = append(buf, make([]byte, alignedElemLength-elemLength)...) } + + // Ensure buf is zero-padded full size. + buf = append(buf, make([]byte, (possibleCPUs-sliceLen)*alignedElemLength)...) + + return buf, nil } // marshalPerCPUValue encodes a slice containing one value per // possible CPU into a buffer of bytes. // // Values are initialized to zero if the slice has less elements than CPUs. -// -// slice must have a type like []elementType. -func marshalPerCPUValue(slice interface{}, elemLength int) (sys.Pointer, error) { - sliceType := reflect.TypeOf(slice) - if sliceType.Kind() != reflect.Slice { - return sys.Pointer{}, errors.New("per-CPU value requires slice") +func marshalPerCPUValue(slice any, elemLength int) (sys.Pointer, error) { + possibleCPUs, err := PossibleCPU() + if err != nil { + return sys.Pointer{}, err } - possibleCPUs, err := internal.PossibleCPUs() + alignedElemLength := internal.Align(elemLength, 8) + buf := make([]byte, 0, alignedElemLength*possibleCPUs) + buf, err = appendPerCPUSlice(buf, slice, possibleCPUs, elemLength, alignedElemLength) if err != nil { return sys.Pointer{}, err } - sliceValue := reflect.ValueOf(slice) - sliceLen := sliceValue.Len() - if sliceLen > possibleCPUs { - return sys.Pointer{}, fmt.Errorf("per-CPU value exceeds number of CPUs") + return sys.UnsafeSlicePointer(buf), nil +} + +// marshalBatchPerCPUValue encodes a batch-sized slice of slices containing +// one value per possible CPU into a buffer of bytes. +func marshalBatchPerCPUValue(slice any, batchLen, elemLength int) ([]byte, error) { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return nil, fmt.Errorf("batch value requires a slice") } + sliceValue := reflect.ValueOf(slice) + possibleCPUs, err := PossibleCPU() + if err != nil { + return nil, err + } + if sliceValue.Len() != batchLen*possibleCPUs { + return nil, fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d", + batchLen*possibleCPUs, sliceValue.Len()) + } alignedElemLength := internal.Align(elemLength, 8) - buf := make([]byte, alignedElemLength*possibleCPUs) - - for i := 0; i < sliceLen; i++ { - elem := sliceValue.Index(i).Interface() - elemBytes, err := marshalBytes(elem, elemLength) + buf := make([]byte, 0, batchLen*alignedElemLength*possibleCPUs) + for i := 0; i < batchLen; i++ { + batch := sliceValue.Slice(i*possibleCPUs, (i+1)*possibleCPUs).Interface() + buf, err = appendPerCPUSlice(buf, batch, possibleCPUs, elemLength, alignedElemLength) if err != nil { - return sys.Pointer{}, err + return nil, fmt.Errorf("batch %d: %w", i, err) } - - offset := i * alignedElemLength - copy(buf[offset:offset+elemLength], elemBytes) } - - return sys.NewSlicePointer(buf), nil + return buf, nil } // unmarshalPerCPUValue decodes a buffer into a slice containing one value per // possible CPU. // -// valueOut must have a type like *[]elementType -func unmarshalPerCPUValue(slicePtr interface{}, elemLength int, buf []byte) error { - slicePtrType := reflect.TypeOf(slicePtr) - if slicePtrType.Kind() != reflect.Ptr || slicePtrType.Elem().Kind() != reflect.Slice { - return fmt.Errorf("per-cpu value requires pointer to slice") +// slice must be a literal slice and not a pointer. +func unmarshalPerCPUValue(slice any, elemLength int, buf []byte) error { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return fmt.Errorf("per-CPU value requires a slice") } - possibleCPUs, err := internal.PossibleCPUs() + possibleCPUs, err := PossibleCPU() if err != nil { return err } - sliceType := slicePtrType.Elem() - slice := reflect.MakeSlice(sliceType, possibleCPUs, possibleCPUs) + sliceValue := reflect.ValueOf(slice) + if sliceValue.Len() != possibleCPUs { + return fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d", + possibleCPUs, sliceValue.Len()) + } sliceElemType := sliceType.Elem() sliceElemIsPointer := sliceElemType.Kind() == reflect.Ptr - if sliceElemIsPointer { - sliceElemType = sliceElemType.Elem() - } - - step := len(buf) / possibleCPUs - if step < elemLength { - return fmt.Errorf("per-cpu element length is larger than available data") - } + stride := internal.Align(elemLength, 8) for i := 0; i < possibleCPUs; i++ { - var elem interface{} + var elem any + v := sliceValue.Index(i) if sliceElemIsPointer { - newElem := reflect.New(sliceElemType) - slice.Index(i).Set(newElem) - elem = newElem.Interface() + if !v.Elem().CanAddr() { + return fmt.Errorf("per-CPU slice elements cannot be nil") + } + elem = v.Elem().Addr().Interface() } else { - elem = slice.Index(i).Addr().Interface() + elem = v.Addr().Interface() } - - // Make a copy, since unmarshal can hold on to itemBytes - elemBytes := make([]byte, elemLength) - copy(elemBytes, buf[:elemLength]) - - err := unmarshalBytes(elem, elemBytes) + err := sysenc.Unmarshal(elem, buf[:elemLength]) if err != nil { return fmt.Errorf("cpu %d: %w", i, err) } - buf = buf[step:] + buf = buf[stride:] + } + return nil +} + +// unmarshalBatchPerCPUValue decodes a buffer into a batch-sized slice +// containing one value per possible CPU. +// +// slice must have length batchLen * PossibleCPUs(). +func unmarshalBatchPerCPUValue(slice any, batchLen, elemLength int, buf []byte) error { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return fmt.Errorf("batch requires a slice") } - reflect.ValueOf(slicePtr).Elem().Set(slice) + sliceValue := reflect.ValueOf(slice) + possibleCPUs, err := PossibleCPU() + if err != nil { + return err + } + if sliceValue.Len() != batchLen*possibleCPUs { + return fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d", + sliceValue.Len(), batchLen*possibleCPUs) + } + + fullValueSize := possibleCPUs * internal.Align(elemLength, 8) + if len(buf) != batchLen*fullValueSize { + return fmt.Errorf("input buffer has incorrect length, expected %d, got %d", + len(buf), batchLen*fullValueSize) + } + + for i := 0; i < batchLen; i++ { + elem := sliceValue.Slice(i*possibleCPUs, (i+1)*possibleCPUs).Interface() + if err := unmarshalPerCPUValue(elem, elemLength, buf[:fullValueSize]); err != nil { + return fmt.Errorf("batch %d: %w", i, err) + } + buf = buf[fullValueSize:] + } return nil } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/memory.go b/src/nvcgo/vendor/github.com/cilium/ebpf/memory.go new file mode 100644 index 000000000..e470bf24f --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/memory.go @@ -0,0 +1,155 @@ +package ebpf + +import ( + "errors" + "fmt" + "io" + "runtime" + + "github.com/cilium/ebpf/internal/unix" +) + +// Memory is the building block for accessing the memory of specific bpf map +// types (Array and Arena at the time of writing) without going through the bpf +// syscall interface. +// +// Given the fd of a bpf map created with the BPF_F_MMAPABLE flag, a shared +// 'file'-based memory-mapped region can be allocated in the process' address +// space, exposing the bpf map's memory by simply accessing a memory location. + +var ErrReadOnly = errors.New("resource is read-only") + +// Memory implements accessing a Map's memory without making any syscalls. +// Pay attention to the difference between Go and C struct alignment rules. Use +// [structs.HostLayout] on supported Go versions to help with alignment. +// +// Note on memory coherence: avoid using packed structs in memory shared between +// user space and eBPF C programs. This drops a struct's memory alignment to 1, +// forcing the compiler to use single-byte loads and stores for field accesses. +// This may lead to partially-written data to be observed from user space. +// +// On most architectures, the memmove implementation used by Go's copy() will +// access data in word-sized chunks. If paired with a matching access pattern on +// the eBPF C side (and if using default memory alignment), accessing shared +// memory without atomics or other synchronization primitives should be sound +// for individual values. For accesses beyond a single value, the usual +// concurrent programming rules apply. +type Memory struct { + b []byte + ro bool + heap bool + + cleanup runtime.Cleanup +} + +func newMemory(fd, size int) (*Memory, error) { + // Typically, maps created with BPF_F_RDONLY_PROG remain writable from user + // space until frozen. As a security precaution, the kernel doesn't allow + // mapping bpf map memory as read-write into user space if the bpf map was + // frozen, or if it was created using the RDONLY_PROG flag. + // + // The user would be able to write to the map after freezing (since the kernel + // can't change the protection mode of an already-mapped page), while the + // verifier assumes the contents to be immutable. + b, err := unix.Mmap(fd, 0, size, unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED) + + // If the map is frozen when an rw mapping is requested, expect EPERM. If the + // map was created with BPF_F_RDONLY_PROG, expect EACCES. + var ro bool + if errors.Is(err, unix.EPERM) || errors.Is(err, unix.EACCES) { + ro = true + b, err = unix.Mmap(fd, 0, size, unix.PROT_READ, unix.MAP_SHARED) + } + if err != nil { + return nil, fmt.Errorf("setting up memory-mapped region: %w", err) + } + + mm := &Memory{b: b, ro: ro, heap: false} + mm.cleanup = runtime.AddCleanup(mm, memoryCleanupFunc(), b) + + return mm, nil +} + +func memoryCleanupFunc() func([]byte) { + return func(b []byte) { + if err := unix.Munmap(b); err != nil { + panic(fmt.Errorf("unmapping memory: %w", err)) + } + } +} + +func (mm *Memory) close() { + mm.cleanup.Stop() + memoryCleanupFunc()(mm.b) + mm.b = nil +} + +// Size returns the size of the memory-mapped region in bytes. +func (mm *Memory) Size() int { + return len(mm.b) +} + +// ReadOnly returns true if the memory-mapped region is read-only. +func (mm *Memory) ReadOnly() bool { + return mm.ro +} + +// bounds returns true if an access at off of the given size is within bounds. +func (mm *Memory) bounds(off uint64, size uint64) bool { + if off+size < off { + return false + } + return off+size <= uint64(len(mm.b)) +} + +// ReadAt implements [io.ReaderAt]. Useful for creating a new [io.OffsetWriter]. +// +// See [Memory] for details around memory coherence. +func (mm *Memory) ReadAt(p []byte, off int64) (int, error) { + if mm.b == nil { + return 0, fmt.Errorf("memory-mapped region closed") + } + + if p == nil { + return 0, fmt.Errorf("input buffer p is nil") + } + + if off < 0 || off >= int64(len(mm.b)) { + return 0, fmt.Errorf("read offset out of range") + } + + n := copy(p, mm.b[off:]) + if n < len(p) { + return n, io.EOF + } + + return n, nil +} + +// WriteAt implements [io.WriterAt]. Useful for creating a new +// [io.SectionReader]. +// +// See [Memory] for details around memory coherence. +func (mm *Memory) WriteAt(p []byte, off int64) (int, error) { + if mm.b == nil { + return 0, fmt.Errorf("memory-mapped region closed") + } + if mm.ro { + return 0, fmt.Errorf("memory-mapped region not writable: %w", ErrReadOnly) + } + + if p == nil { + return 0, fmt.Errorf("output buffer p is nil") + } + + if off < 0 || off >= int64(len(mm.b)) { + return 0, fmt.Errorf("write offset out of range") + } + + n := copy(mm.b[off:], p) + if n < len(p) { + return n, io.EOF + } + + return n, nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/memory_unsafe.go b/src/nvcgo/vendor/github.com/cilium/ebpf/memory_unsafe.go new file mode 100644 index 000000000..9518ff35d --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/memory_unsafe.go @@ -0,0 +1,343 @@ +package ebpf + +import ( + "errors" + "fmt" + "os" + "reflect" + "runtime" + "unsafe" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/unix" +) + +// This file contains an experimental, unsafe implementation of Memory that +// allows taking a Go pointer to a memory-mapped region. This currently does not +// have first-class support from the Go runtime, so it may break in future Go +// versions. The Go proposal for the runtime to track off-heap pointers is here: +// https://github.com/golang/go/issues/70224. +// +// In Go, the programmer should not have to worry about freeing memory. Since +// this API synthesizes Go variables around global variables declared in a BPF +// C program, we want to lean on the runtime for making sure accessing them is +// safe at all times. Unfortunately, Go (as of 1.24) does not have the ability +// of automatically managing memory that was not allocated by the runtime. +// +// This led to a solution that requests regular Go heap memory by allocating a +// slice (making the runtime track pointers into the slice's backing array) and +// memory-mapping the bpf map's memory over it. Then, before returning the +// Memory to the caller, a finalizer is set on the backing array, making sure +// the bpf map's memory is unmapped from the heap before releasing the backing +// array to the runtime for reallocation. +// +// This obviates the need to maintain a reference to the *Memory at all times, +// which is difficult for the caller to achieve if the variable access is done +// through another object (like a sync.Atomic) that can potentially be passed +// around the Go application. Accidentally losing the reference to the *Memory +// would result in hard-to-debug segfaults, which are always unexpected in Go. + +//go:linkname heapObjectsCanMove runtime.heapObjectsCanMove +func heapObjectsCanMove() bool + +// Set from a file behind the ebpf_unsafe_memory_experiment build tag to enable +// features that require mapping bpf map memory over the Go heap. +var unsafeMemory = false + +// ErrInvalidType is returned when the given type cannot be used as a Memory or +// Variable pointer. +var ErrInvalidType = errors.New("invalid type") + +func newUnsafeMemory(fd, size int) (*Memory, error) { + // Some architectures need the size to be page-aligned to work with MAP_FIXED. + if size%os.Getpagesize() != 0 { + return nil, fmt.Errorf("memory: must be a multiple of page size (requested %d bytes)", size) + } + + // Allocate a page-aligned span of memory on the Go heap. + alloc, err := allocate(size) + if err != nil { + return nil, fmt.Errorf("allocating memory: %w", err) + } + + // Typically, maps created with BPF_F_RDONLY_PROG remain writable from user + // space until frozen. As a security precaution, the kernel doesn't allow + // mapping bpf map memory as read-write into user space if the bpf map was + // frozen, or if it was created using the RDONLY_PROG flag. + // + // The user would be able to write to the map after freezing (since the kernel + // can't change the protection mode of an already-mapped page), while the + // verifier assumes the contents to be immutable. + // + // Map the bpf map memory over a page-aligned allocation on the Go heap. + err = mapmap(fd, alloc, size, unix.PROT_READ|unix.PROT_WRITE) + + // If the map is frozen when an rw mapping is requested, expect EPERM. If the + // map was created with BPF_F_RDONLY_PROG, expect EACCES. + var ro bool + if errors.Is(err, unix.EPERM) || errors.Is(err, unix.EACCES) { + ro = true + err = mapmap(fd, alloc, size, unix.PROT_READ) + } + if err != nil { + return nil, fmt.Errorf("setting up memory-mapped region: %w", err) + } + + mm := &Memory{ + unsafe.Slice((*byte)(alloc), size), + ro, + true, + runtime.Cleanup{}, + } + + return mm, nil +} + +// allocate returns a pointer to a page-aligned section of memory on the Go +// heap, managed by the runtime. +// +//go:nocheckptr +func allocate(size int) (unsafe.Pointer, error) { + // Memory-mapping over a piece of the Go heap is unsafe when the GC can + // randomly decide to move objects around, in which case the mapped region + // will not move along with it. + if heapObjectsCanMove() { + return nil, errors.New("this Go runtime has a moving garbage collector") + } + + if size == 0 { + return nil, errors.New("size must be greater than 0") + } + + // Request at least two pages of memory from the runtime to ensure we can + // align the requested allocation to a page boundary. This is needed for + // MAP_FIXED and makes sure we don't mmap over some other allocation on the Go + // heap. + size = internal.Align(size+os.Getpagesize(), os.Getpagesize()) + + // Allocate a new slice and store a pointer to its backing array. + alloc := unsafe.Pointer(unsafe.SliceData(make([]byte, size))) + + // nolint:govet + // + // Align the pointer to a page boundary within the allocation. This may alias + // the initial pointer if it was already page-aligned. Ignore govet warnings + // since we're calling [runtime.KeepAlive] on the original Go memory. + aligned := unsafe.Pointer(internal.Align(uintptr(alloc), uintptr(os.Getpagesize()))) + runtime.KeepAlive(alloc) + + // Return an aligned pointer into the backing array, losing the original + // reference. The runtime.SetFinalizer docs specify that its argument 'must be + // a pointer to an object, complit or local var', but this is still somewhat + // vague and not enforced by the current implementation. + // + // Currently, finalizers can be set and triggered from any address within a + // heap allocation, even individual struct fields or arbitrary offsets within + // a slice. In this case, finalizers set on struct fields or slice offsets + // will only run when the whole struct or backing array are collected. The + // accepted runtime.AddCleanup proposal makes this behaviour more explicit and + // is set to deprecate runtime.SetFinalizer. + // + // Alternatively, we'd have to track the original allocation and the aligned + // pointer separately, which severely complicates finalizer setup and makes it + // prone to human error. For now, just bump the pointer and treat it as the + // new and only reference to the backing array. + return aligned, nil +} + +// mapmap memory-maps the given file descriptor at the given address and sets a +// finalizer on addr to unmap it when it's no longer reachable. +func mapmap(fd int, addr unsafe.Pointer, size, flags int) error { + // Map the bpf map memory over the Go heap. This will result in the following + // mmap layout in the process' address space (0xc000000000 is a span of Go + // heap), visualized using pmap: + // + // Address Kbytes RSS Dirty Mode Mapping + // 000000c000000000 1824 864 864 rw--- [ anon ] + // 000000c0001c8000 4 4 4 rw-s- [ anon ] + // 000000c0001c9000 2268 16 16 rw--- [ anon ] + // + // This will break up the Go heap, but as long as the runtime doesn't try to + // move our allocation around, this is safe for as long as we hold a reference + // to our allocated object. + // + // Use MAP_SHARED to make sure the kernel sees any writes we do, and MAP_FIXED + // to ensure the mapping starts exactly at the address we requested. If alloc + // isn't page-aligned, the mapping operation will fail. + if _, err := unix.MmapPtr(fd, 0, addr, uintptr(size), + flags, unix.MAP_SHARED|unix.MAP_FIXED); err != nil { + return fmt.Errorf("setting up memory-mapped region: %w", err) + } + + // Set a finalizer on the heap allocation to undo the mapping before the span + // is collected and reused by the runtime. This has a few reasons: + // + // - Avoid leaking memory/mappings. + // - Future writes to this memory should never clobber a bpf map's contents. + // - Some bpf maps are mapped read-only, causing a segfault if the runtime + // reallocates and zeroes the span later. + runtime.SetFinalizer((*byte)(addr), unmap(size)) + + return nil +} + +// unmap returns a function that takes a pointer to a memory-mapped region on +// the Go heap. The function undoes any mappings and discards the span's +// contents. +// +// Used as a finalizer in [newMemory], split off into a separate function for +// testing and to avoid accidentally closing over the unsafe.Pointer to the +// memory region, which would cause a cyclical reference. +// +// The resulting function panics if the mmap operation returns an error, since +// it would mean the integrity of the Go heap is compromised. +func unmap(size int) func(*byte) { + return func(a *byte) { + // Create another mapping at the same address to undo the original mapping. + // This will cause the kernel to repair the slab since we're using the same + // protection mode and flags as the original mapping for the Go heap. + // + // Address Kbytes RSS Dirty Mode Mapping + // 000000c000000000 4096 884 884 rw--- [ anon ] + // + // Using munmap here would leave an unmapped hole in the heap, compromising + // its integrity. + // + // MmapPtr allocates another unsafe.Pointer at the same address. Even though + // we discard it here, it may temporarily resurrect the backing array and + // delay its collection to the next GC cycle. + _, err := unix.MmapPtr(-1, 0, unsafe.Pointer(a), uintptr(size), + unix.PROT_READ|unix.PROT_WRITE, + unix.MAP_PRIVATE|unix.MAP_FIXED|unix.MAP_ANON) + if err != nil { + panic(fmt.Errorf("undoing bpf map memory mapping: %w", err)) + } + } +} + +// checkUnsafeMemory ensures value T can be accessed in mm at offset off. +// +// The comparable constraint narrows down the set of eligible types to exclude +// slices, maps and functions. These complex types cannot be mapped to memory +// directly. +func checkUnsafeMemory[T comparable](mm *Memory, off uint64) error { + if mm.b == nil { + return fmt.Errorf("memory-mapped region is nil") + } + if mm.ro { + return ErrReadOnly + } + if !mm.heap { + return fmt.Errorf("memory region is not heap-mapped, build with '-tags ebpf_unsafe_memory_experiment' to enable: %w", ErrNotSupported) + } + + t := reflect.TypeFor[T]() + if err := checkType(t.String(), t); err != nil { + return err + } + + size := t.Size() + if size == 0 { + return fmt.Errorf("zero-sized type %s: %w", t, ErrInvalidType) + } + + if off%uint64(t.Align()) != 0 { + return fmt.Errorf("unaligned access of memory-mapped region: %d-byte aligned read at offset %d", t.Align(), off) + } + + vs, bs := uint64(size), uint64(len(mm.b)) + if off+vs > bs { + return fmt.Errorf("%d-byte value at offset %d exceeds mmap size of %d bytes", vs, off, bs) + } + + return nil +} + +// checkType recursively checks if the given type is supported for memory +// mapping. Only fixed-size, non-Go-pointer types are supported: bools, floats, +// (u)int[8-64], arrays, and structs containing them. As an exception, uintptr +// is allowed since the backing memory is expected to contain 32-bit pointers on +// 32-bit systems despite BPF always allocating 64 bits for pointers in a data +// section. +// +// Doesn't check for loops since it rejects pointers. Should that ever change, a +// visited set would be needed. +func checkType(name string, t reflect.Type) error { + // Special-case atomic types to allow them to be used as root types as well as + // struct fields. Notably, omit atomic.Value and atomic.Pointer since those + // are pointer types. Also, atomic.Value embeds an interface value, which + // doesn't make sense to share with C land. + if t.PkgPath() == "sync/atomic" { + switch t.Name() { + case "Bool", "Int32", "Int64", "Uint32", "Uint64", "Uintptr": + return nil + } + } + + switch t.Kind() { + case reflect.Uintptr, reflect.Bool, reflect.Float32, reflect.Float64, + reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return nil + + case reflect.Array: + at := t.Elem() + if err := checkType(fmt.Sprintf("%s.%s", name, at.String()), at); err != nil { + return err + } + + case reflect.Struct: + var hasHostLayout bool + for i := range t.NumField() { + at := t.Field(i).Type + + // Require [structs.HostLayout] to be embedded in all structs. Check the + // full package path to reject a user-defined HostLayout type. + if at.PkgPath() == "structs" && at.Name() == "HostLayout" { + hasHostLayout = true + continue + } + + if err := checkType(fmt.Sprintf("%s.%s", name, at.String()), at); err != nil { + return err + } + } + + if !hasHostLayout { + return fmt.Errorf("struct %s must embed structs.HostLayout: %w", name, ErrInvalidType) + } + + default: + // For basic types like int and bool, the kind name is the same as the type + // name, so the fallthrough case would print 'int type int not supported'. + // Omit the kind name if it matches the type name. + if t.String() == t.Kind().String() { + // Output: type int not supported + return fmt.Errorf("type %s not supported: %w", name, ErrInvalidType) + } + + // Output: interface value io.Reader not supported + return fmt.Errorf("%s type %s not supported: %w", t.Kind(), name, ErrInvalidType) + } + + return nil +} + +// memoryPointer returns a pointer to a value of type T at offset off in mm. +// Taking a pointer to a read-only Memory or to a Memory that is not heap-mapped +// is not supported. +// +// T must contain only fixed-size, non-Go-pointer types: bools, floats, +// (u)int[8-64], arrays, and structs containing them. Structs must embed +// [structs.HostLayout]. [ErrInvalidType] is returned if T is not a valid type. +// +// Memory must be writable, off must be aligned to the size of T, and the value +// must be within bounds of the Memory. +// +// To access read-only memory, use [Memory.ReadAt]. +func memoryPointer[T comparable](mm *Memory, off uint64) (*T, error) { + if err := checkUnsafeMemory[T](mm, off); err != nil { + return nil, fmt.Errorf("memory pointer: %w", err) + } + return (*T)(unsafe.Pointer(&mm.b[off])), nil +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/memory_unsafe_tag.go b/src/nvcgo/vendor/github.com/cilium/ebpf/memory_unsafe_tag.go new file mode 100644 index 000000000..e662065ed --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/memory_unsafe_tag.go @@ -0,0 +1,7 @@ +//go:build ebpf_unsafe_memory_experiment + +package ebpf + +func init() { + unsafeMemory = true +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/netlify.toml b/src/nvcgo/vendor/github.com/cilium/ebpf/netlify.toml new file mode 100644 index 000000000..764c3b447 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/netlify.toml @@ -0,0 +1,5 @@ +[build] + base = "docs/" + publish = "site/" + command = "mkdocs build" + environment = { PYTHON_VERSION = "3.13" } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/prog.go b/src/nvcgo/vendor/github.com/cilium/ebpf/prog.go index 523e6a54e..3e724234d 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/prog.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/prog.go @@ -5,27 +5,40 @@ import ( "encoding/binary" "errors" "fmt" - "io" "math" "path/filepath" + "runtime" + "slices" "strings" "time" "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" "github.com/cilium/ebpf/internal" - "github.com/cilium/ebpf/internal/btf" + "github.com/cilium/ebpf/internal/linux" + "github.com/cilium/ebpf/internal/platform" "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/sysenc" "github.com/cilium/ebpf/internal/unix" ) // ErrNotSupported is returned whenever the kernel doesn't support a feature. var ErrNotSupported = internal.ErrNotSupported -var errUnsatisfiedMap = errors.New("unsatisfied map reference") -var errUnsatisfiedProgram = errors.New("unsatisfied program reference") +// errBadRelocation is returned when the verifier rejects a program due to a +// bad CO-RE relocation. +// +// This error is detected based on heuristics and therefore may not be reliable. +var errBadRelocation = errors.New("bad CO-RE relocation") + +// errUnknownKfunc is returned when the verifier rejects a program due to an +// unknown kfunc. +// +// This error is detected based on heuristics and therefore may not be reliable. +var errUnknownKfunc = errors.New("unknown kfunc") // ProgramID represents the unique ID of an eBPF program. -type ProgramID uint32 +type ProgramID = sys.ProgramID const ( // Number of bytes to pad the output buffer for BPF_PROG_TEST_RUN. @@ -34,34 +47,82 @@ const ( outputPad = 256 + 2 ) -// DefaultVerifierLogSize is the default number of bytes allocated for the +// minVerifierLogSize is the default number of bytes allocated for the // verifier log. -const DefaultVerifierLogSize = 64 * 1024 +const minVerifierLogSize = 64 * 1024 + +// maxVerifierLogSize is the maximum size of verifier log buffer the kernel +// will accept before returning EINVAL. May be increased to MaxUint32 in the +// future, but avoid the unnecessary EINVAL for now. +const maxVerifierLogSize = math.MaxUint32 >> 2 + +// maxVerifierAttempts is the maximum number of times the verifier will retry +// loading a program with a growing log buffer before giving up. Since we double +// the log size on every attempt, this is the absolute maximum number of +// attempts before the buffer reaches [maxVerifierLogSize]. +const maxVerifierAttempts = 30 // ProgramOptions control loading a program into the kernel. type ProgramOptions struct { - // Controls the detail emitted by the kernel verifier. Set to non-zero - // to enable logging. - LogLevel uint32 - // Controls the output buffer size for the verifier. Defaults to - // DefaultVerifierLogSize. - LogSize int - // An ELF containing the target BTF for this program. It is used both to - // find the correct function to trace and to apply CO-RE relocations. + // Bitmap controlling the detail emitted by the kernel's eBPF verifier log. + // LogLevel-type values can be ORed together to request specific kinds of + // verifier output. See the documentation on [ebpf.LogLevel] for details. + // + // opts.LogLevel = (ebpf.LogLevelBranch | ebpf.LogLevelStats) + // + // If left to its default value, the program will first be loaded without + // verifier output enabled. Upon error, the program load will be repeated + // with LogLevelBranch and the given (or default) LogSize value. + // + // Unless LogDisabled is set, setting this to a non-zero value will enable the verifier + // log, populating the [ebpf.Program.VerifierLog] field on successful loads + // and including detailed verifier errors if the program is rejected. This + // will always allocate an output buffer, but will result in only a single + // attempt at loading the program. + LogLevel LogLevel + + // Starting size of the verifier log buffer. If the verifier log is larger + // than this size, the buffer will be grown to fit the entire log. Leave at + // its default value unless troubleshooting. + LogSizeStart uint32 + + // Disables the verifier log completely, regardless of other options. + LogDisabled bool + + // Type information used for CO-RE relocations. + // // This is useful in environments where the kernel BTF is not available // (containers) or where it is in a non-standard location. Defaults to - // use the kernel BTF from a well-known location. - TargetBTF io.ReaderAt + // use the kernel BTF from a well-known location if nil. + KernelTypes *btf.Spec + + // Additional targets to consider for CO-RE relocations. This can be used to + // pass BTF information for kernel modules when it's not present on + // KernelTypes. + ExtraRelocationTargets []*btf.Spec } // ProgramSpec defines a Program. type ProgramSpec struct { - // Name is passed to the kernel as a debug aid. Must only contain - // alpha numeric and '_' characters. + // Name is passed to the kernel as a debug aid. + // + // Unsupported characters will be stripped. Name string // Type determines at which hook in the kernel a program will run. - Type ProgramType + Type ProgramType + + // Network interface index the user intends to attach this program to after + // loading. Only valid for some program types. + // + // Provides driver-specific context about the target interface to the + // verifier, required when using certain BPF helpers. + Ifindex uint32 + + // AttachType of the program, needed to differentiate allowed context + // accesses in some newer program types like CGroupSockAddr. + // + // Available on kernels 4.17 and later. AttachType AttachType // Name of a kernel data structure or function to attach to. Its @@ -71,7 +132,7 @@ type ProgramSpec struct { // The program to attach to. Must be provided manually. AttachTarget *Program - // The name of the ELF section this program orininated from. + // The name of the ELF section this program originated from. SectionName string Instructions asm.Instructions @@ -92,16 +153,8 @@ type ProgramSpec struct { // detect this value automatically. KernelVersion uint32 - // The BTF associated with this program. Changing Instructions - // will most likely invalidate the contained data, and may - // result in errors when attempting to load it into the kernel. - BTF *btf.Program - // The byte order this program was compiled for, may be nil. ByteOrder binary.ByteOrder - - // Programs called by this ProgramSpec. Includes all dependencies. - references map[string]*ProgramSpec } // Copy returns a copy of the spec. @@ -123,82 +176,31 @@ func (ps *ProgramSpec) Tag() (string, error) { return ps.Instructions.Tag(internal.NativeEndian) } -// flatten returns spec's full instruction stream including all of its -// dependencies and an expanded map of references that includes all symbols -// appearing in the instruction stream. -// -// Returns nil, nil if spec was already visited. -func (spec *ProgramSpec) flatten(visited map[*ProgramSpec]bool) (asm.Instructions, map[string]*ProgramSpec) { - if visited == nil { - visited = make(map[*ProgramSpec]bool) - } - - // This program and its dependencies were already collected. - if visited[spec] { - return nil, nil +// targetsKernelModule returns true if the program supports being attached to a +// symbol provided by a kernel module. +func (ps *ProgramSpec) targetsKernelModule() bool { + if ps.AttachTo == "" { + return false } - visited[spec] = true - - // Start off with spec's direct references and instructions. - progs := spec.references - insns := spec.Instructions - - // Recurse into each reference and append/merge its references into - // a temporary buffer as to not interfere with the resolution process. - for _, ref := range spec.references { - if ri, rp := ref.flatten(visited); ri != nil || rp != nil { - insns = append(insns, ri...) - - // Merge nested references into the top-level scope. - for n, p := range rp { - progs[n] = p - } + switch ps.Type { + case Tracing: + switch ps.AttachType { + case AttachTraceFEntry, AttachTraceFExit: + return true } + case Kprobe: + return true } - return insns, progs -} - -// A reference describes a byte offset an Symbol Instruction pointing -// to another ProgramSpec. -type reference struct { - offset uint64 - spec *ProgramSpec + return false } -// layout returns a unique list of programs that must be included -// in spec's instruction stream when inserting it into the kernel. -// Always returns spec itself as the first entry in the chain. -func (spec *ProgramSpec) layout() ([]reference, error) { - out := []reference{{0, spec}} - - name := spec.Instructions.Name() - - var ins *asm.Instruction - iter := spec.Instructions.Iterate() - for iter.Next() { - ins = iter.Ins - - // Skip non-symbols and symbols that describe the ProgramSpec itself, - // which is usually the first instruction in Instructions. - // ProgramSpec itself is already included and not present in references. - if ins.Symbol == "" || ins.Symbol == name { - continue - } - - // Failure to look up a reference is not an error. There are existing tests - // with valid progs that contain multiple symbols and don't have references - // populated. Assume ProgramSpec is used similarly in the wild, so don't - // alter this behaviour. - ref := spec.references[ins.Symbol] - if ref != nil { - out = append(out, reference{iter.Offset.Bytes(), ref}) - } - } - - return out, nil -} +// VerifierError is returned by [NewProgram] and [NewProgramWithOptions] if a +// program is rejected by the verifier. +// +// Use [errors.As] to access the error. +type VerifierError = internal.VerifierError // Program represents BPF program loaded into the kernel. // @@ -216,8 +218,10 @@ type Program struct { // NewProgram creates a new Program. // -// Loading a program for the first time will perform -// feature detection by loading small, temporary programs. +// See [NewProgramWithOptions] for details. +// +// Returns a [VerifierError] containing the full verifier log if the program is +// rejected by the kernel. func NewProgram(spec *ProgramSpec) (*Program, error) { return NewProgramWithOptions(spec, ProgramOptions{}) } @@ -226,22 +230,31 @@ func NewProgram(spec *ProgramSpec) (*Program, error) { // // Loading a program for the first time will perform // feature detection by loading small, temporary programs. +// +// Returns a [VerifierError] containing the full verifier log if the program is +// rejected by the kernel. func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) { if spec == nil { return nil, errors.New("can't load a program from a nil spec") } - handles := newHandleCache() - defer handles.close() - - prog, err := newProgramWithOptions(spec, opts, handles) - if errors.Is(err, errUnsatisfiedMap) { + prog, err := newProgramWithOptions(spec, opts, btf.NewCache()) + if errors.Is(err, asm.ErrUnsatisfiedMapReference) { return nil, fmt.Errorf("cannot load program without loading its whole collection: %w", err) } return prog, err } -func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *handleCache) (*Program, error) { +var ( + coreBadLoad = []byte(fmt.Sprintf("(18) r10 = 0x%x\n", btf.COREBadRelocationSentinel)) + // This log message was introduced by ebb676daa1a3 ("bpf: Print function name in + // addition to function id") which first appeared in v4.10 and has remained + // unchanged since. + coreBadCall = []byte(fmt.Sprintf("invalid func unknown#%d\n", btf.COREBadRelocationSentinel)) + kfuncBadCall = []byte(fmt.Sprintf("invalid func unknown#%d\n", kfuncCallPoisonBase)) +) + +func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, c *btf.Cache) (*Program, error) { if len(spec.Instructions) == 0 { return nil, errors.New("instructions cannot be empty") } @@ -260,81 +273,94 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand // Overwrite Kprobe program version if set to zero or the magic version constant. kv := spec.KernelVersion if spec.Type == Kprobe && (kv == 0 || kv == internal.MagicKernelVersion) { - v, err := internal.KernelVersion() + v, err := linux.KernelVersion() if err != nil { return nil, fmt.Errorf("detecting kernel version: %w", err) } kv = v.Kernel() } + p, progType := platform.DecodeConstant(spec.Type) + if p != platform.Native { + return nil, fmt.Errorf("program type %s (%s): %w", spec.Type, p, internal.ErrNotSupportedOnOS) + } + attr := &sys.ProgLoadAttr{ - ProgType: sys.ProgType(spec.Type), + ProgName: maybeFillObjName(spec.Name), + ProgType: sys.ProgType(progType), ProgFlags: spec.Flags, + ProgIfindex: spec.Ifindex, ExpectedAttachType: sys.AttachType(spec.AttachType), License: sys.NewStringPointer(spec.License), KernVersion: kv, } - if haveObjName() == nil { - attr.ProgName = sys.NewObjName(spec.Name) - } + insns := make(asm.Instructions, len(spec.Instructions)) + copy(insns, spec.Instructions) - var err error - var targetBTF *btf.Spec - if opts.TargetBTF != nil { - targetBTF, err = handles.btfSpec(opts.TargetBTF) - if err != nil { - return nil, fmt.Errorf("load target BTF: %w", err) - } + var b btf.Builder + if err := applyRelocations(insns, spec.ByteOrder, &b, c, opts.KernelTypes, opts.ExtraRelocationTargets); err != nil { + return nil, fmt.Errorf("apply CO-RE relocations: %w", err) } - layout, err := spec.layout() - if err != nil { - return nil, fmt.Errorf("get program layout: %w", err) + errExtInfos := haveProgramExtInfos() + if !b.Empty() && errors.Is(errExtInfos, ErrNotSupported) { + // There is at least one CO-RE relocation which relies on a stable local + // type ID. + // Return ErrNotSupported instead of E2BIG if there is no BTF support. + return nil, errExtInfos } - var btfDisabled bool - var core btf.COREFixups - if spec.BTF != nil { - core, err = spec.BTF.Fixups(targetBTF) + if errExtInfos == nil { + // Only add func and line info if the kernel supports it. This allows + // BPF compiled with modern toolchains to work on old kernels. + fib, lib, err := btf.MarshalExtInfos(insns, &b) if err != nil { - return nil, fmt.Errorf("CO-RE relocations: %w", err) + return nil, fmt.Errorf("marshal ext_infos: %w", err) } - handle, err := handles.btfHandle(spec.BTF.Spec()) - btfDisabled = errors.Is(err, btf.ErrNotSupported) - if err != nil && !btfDisabled { + attr.FuncInfoRecSize = btf.FuncInfoSize + attr.FuncInfoCnt = uint32(len(fib)) / btf.FuncInfoSize + attr.FuncInfo = sys.SlicePointer(fib) + + attr.LineInfoRecSize = btf.LineInfoSize + attr.LineInfoCnt = uint32(len(lib)) / btf.LineInfoSize + attr.LineInfo = sys.SlicePointer(lib) + } + + if !b.Empty() { + handle, err := btf.NewHandle(&b) + if err != nil { return nil, fmt.Errorf("load BTF: %w", err) } + defer handle.Close() - if handle != nil { - attr.ProgBtfFd = uint32(handle.FD()) + attr.ProgBtfFd = uint32(handle.FD()) + } - fib, err := marshalFuncInfos(layout) - if err != nil { - return nil, err - } - attr.FuncInfoRecSize = uint32(binary.Size(btf.FuncInfo{})) - attr.FuncInfoCnt = uint32(len(fib)) / attr.FuncInfoRecSize - attr.FuncInfo = sys.NewSlicePointer(fib) + kconfig, err := resolveKconfigReferences(insns) + if err != nil { + return nil, fmt.Errorf("resolve .kconfig: %w", err) + } + defer kconfig.Close() - lib, err := marshalLineInfos(layout) - if err != nil { - return nil, err - } - attr.LineInfoRecSize = uint32(binary.Size(btf.LineInfo{})) - attr.LineInfoCnt = uint32(len(lib)) / attr.LineInfoRecSize - attr.LineInfo = sys.NewSlicePointer(lib) - } + if err := resolveKsymReferences(insns); err != nil { + return nil, fmt.Errorf("resolve .ksyms: %w", err) } - insns, err := core.Apply(spec.Instructions) + if err := fixupAndValidate(insns); err != nil { + return nil, err + } + + handles, err := fixupKfuncs(insns, c) if err != nil { - return nil, fmt.Errorf("CO-RE fixup: %w", err) + return nil, fmt.Errorf("fixing up kfuncs: %w", err) } + defer handles.Close() - if err := fixupJumpsAndCalls(insns); err != nil { - return nil, err + if len(handles) > 0 { + fdArray := handles.fdArray() + attr.FdArray = sys.SlicePointer(fdArray) } buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) @@ -344,94 +370,209 @@ func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions, handles *hand } bytecode := buf.Bytes() - attr.Insns = sys.NewSlicePointer(bytecode) + attr.Insns = sys.SlicePointer(bytecode) attr.InsnCnt = uint32(len(bytecode) / asm.InstructionSize) - if spec.AttachTo != "" { - if spec.AttachTarget != nil { - info, err := spec.AttachTarget.Info() - if err != nil { - return nil, fmt.Errorf("load target BTF: %w", err) - } + if spec.AttachTarget != nil { + targetID, err := findTargetInProgram(spec.AttachTarget, spec.AttachTo, spec.Type, spec.AttachType) + if err != nil { + return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err) + } - btfID, ok := info.BTFID() - if !ok { - return nil, fmt.Errorf("load target BTF: no BTF info available") + attr.AttachBtfId = targetID + attr.AttachBtfObjFd = uint32(spec.AttachTarget.FD()) + defer runtime.KeepAlive(spec.AttachTarget) + } else if spec.AttachTo != "" { + var targetMember string + attachTo := spec.AttachTo + + if spec.Type == StructOps { + attachTo, targetMember, _ = strings.Cut(attachTo, ":") + if targetMember == "" { + return nil, fmt.Errorf("struct_ops: AttachTo must be ':' (got %s)", spec.AttachTo) } - btfHandle, err := btf.NewHandleFromID(btfID) + } + + module, targetID, err := findProgramTargetInKernel(attachTo, spec.Type, spec.AttachType, c) + if err != nil && !errors.Is(err, errUnrecognizedAttachType) { + // We ignore errUnrecognizedAttachType since AttachTo may be non-empty + // for programs that don't attach anywhere. + return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err) + } + + if spec.Type == StructOps { + var s *btf.Spec + + target := btf.Type((*btf.Struct)(nil)) + s, module, err = findTargetInKernel(attachTo, &target, c) if err != nil { - return nil, fmt.Errorf("load target BTF: %w", err) + return nil, fmt.Errorf("lookup struct_ops kern type %q: %w", attachTo, err) } - defer btfHandle.Close() + kType := target.(*btf.Struct) - targetBTF = btfHandle.Spec() + targetID, err = s.TypeID(kType) if err != nil { - return nil, fmt.Errorf("load target BTF: %w", err) + return nil, fmt.Errorf("type id for %s: %w", kType.TypeName(), err) } - } - target, err := resolveBTFType(targetBTF, spec.AttachTo, spec.Type, spec.AttachType) - if err != nil { - return nil, err - } - if target != nil { - attr.AttachBtfId = uint32(target.ID()) + idx := slices.IndexFunc(kType.Members, func(m btf.Member) bool { + return m.Name == targetMember + }) + if idx < 0 { + return nil, fmt.Errorf("member %q not found in %s", targetMember, kType.Name) + } + + // ExpectedAttachType: index of the target member in the struct + attr.ExpectedAttachType = sys.AttachType(idx) } - if spec.AttachTarget != nil { - attr.AttachProgFd = uint32(spec.AttachTarget.FD()) + + attr.AttachBtfId = targetID + if module != nil && attr.AttachBtfObjFd == 0 { + attr.AttachBtfObjFd = uint32(module.FD()) + defer module.Close() } } - logSize := DefaultVerifierLogSize - if opts.LogSize > 0 { - logSize = opts.LogSize + if platform.IsWindows && opts.LogLevel != 0 { + return nil, fmt.Errorf("log level: %w", internal.ErrNotSupportedOnOS) } var logBuf []byte - if opts.LogLevel > 0 { - logBuf = make([]byte, logSize) - attr.LogLevel = opts.LogLevel - attr.LogSize = uint32(len(logBuf)) - attr.LogBuf = sys.NewSlicePointer(logBuf) + var fd *sys.FD + if opts.LogDisabled { + // Loading with logging disabled should never retry. + fd, err = sys.ProgLoad(attr) + if err == nil { + return &Program{"", fd, spec.Name, "", spec.Type}, nil + } + } else { + // Only specify log size if log level is also specified. Setting size + // without level results in EINVAL. Level will be bumped to LogLevelBranch + // if the first load fails. + if opts.LogLevel != 0 { + attr.LogLevel = opts.LogLevel + attr.LogSize = internal.Between(opts.LogSizeStart, minVerifierLogSize, maxVerifierLogSize) + } + + attempts := 1 + for { + if attr.LogLevel != 0 { + logBuf = make([]byte, attr.LogSize) + attr.LogBuf = sys.SlicePointer(logBuf) + } + + fd, err = sys.ProgLoad(attr) + if err == nil { + return &Program{unix.ByteSliceToString(logBuf), fd, spec.Name, "", spec.Type}, nil + } + + if !retryLogAttrs(attr, opts.LogSizeStart, err) { + break + } + + if attempts >= maxVerifierAttempts { + return nil, fmt.Errorf("load program: %w (bug: hit %d verifier attempts)", err, maxVerifierAttempts) + } + attempts++ + } } - fd, err := sys.ProgLoad(attr) - if err == nil { - return &Program{unix.ByteSliceToString(logBuf), fd, spec.Name, "", spec.Type}, nil + end := bytes.IndexByte(logBuf, 0) + if end < 0 { + end = len(logBuf) } - logErr := err - if opts.LogLevel == 0 && opts.LogSize >= 0 { - // Re-run with the verifier enabled to get better error messages. - logBuf = make([]byte, logSize) - attr.LogLevel = 1 - attr.LogSize = uint32(len(logBuf)) - attr.LogBuf = sys.NewSlicePointer(logBuf) + tail := logBuf[max(end-256, 0):end] + switch { + case errors.Is(err, unix.EPERM): + if len(logBuf) > 0 && logBuf[0] == 0 { + // EPERM due to RLIMIT_MEMLOCK happens before the verifier, so we can + // check that the log is empty to reduce false positives. + return nil, fmt.Errorf("load program: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err) + } + + case errors.Is(err, unix.EFAULT): + // EFAULT is returned when the kernel hits a verifier bug, and always + // overrides ENOSPC, defeating the buffer growth strategy. Warn the user + // that they may need to increase the buffer size manually. + return nil, fmt.Errorf("load program: %w (hit verifier bug, increase LogSizeStart to fit the log and check dmesg)", err) - fd, logErr = sys.ProgLoad(attr) - if logErr == nil { - fd.Close() + case errors.Is(err, unix.EINVAL): + if bytes.Contains(tail, coreBadCall) { + err = errBadRelocation + break + } else if bytes.Contains(tail, kfuncBadCall) { + err = errUnknownKfunc + break + } + + case errors.Is(err, unix.EACCES): + if bytes.Contains(tail, coreBadLoad) { + err = errBadRelocation + break } } - if errors.Is(logErr, unix.EPERM) && len(logBuf) > 0 && logBuf[0] == 0 { - // EPERM due to RLIMIT_MEMLOCK happens before the verifier, so we can - // check that the log is empty to reduce false positives. - return nil, fmt.Errorf("load program: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", logErr) + // hasFunctionReferences may be expensive, so check it last. + if (errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM)) && + hasFunctionReferences(spec.Instructions) { + if err := haveBPFToBPFCalls(); err != nil { + return nil, fmt.Errorf("load program: %w", err) + } } - err = internal.ErrorWithLog(err, logBuf, logErr) - if btfDisabled { - return nil, fmt.Errorf("load program without BTF: %w", err) + return nil, internal.ErrorWithLog("load program", err, logBuf) +} + +func retryLogAttrs(attr *sys.ProgLoadAttr, startSize uint32, err error) bool { + if attr.LogSize == maxVerifierLogSize { + // Maximum buffer size reached, don't grow or retry. + return false } - return nil, fmt.Errorf("load program: %w", err) + + // ENOSPC means the log was enabled on the previous iteration, so we only + // need to grow the buffer. + if errors.Is(err, unix.ENOSPC) { + if attr.LogTrueSize != 0 { + // Kernel supports LogTrueSize and previous iteration undershot the buffer + // size. Try again with the given true size. + attr.LogSize = attr.LogTrueSize + return true + } + + // Ensure the size doesn't overflow. + const factor = 2 + if attr.LogSize >= maxVerifierLogSize/factor { + attr.LogSize = maxVerifierLogSize + return true + } + + // Make an educated guess how large the buffer should be by multiplying. Due + // to int division, this rounds down odd sizes. + attr.LogSize = internal.Between(attr.LogSize, minVerifierLogSize, maxVerifierLogSize/factor) + attr.LogSize *= factor + + return true + } + + if attr.LogLevel == 0 { + // Loading the program failed, it wasn't a buffer-related error, and the log + // was disabled the previous iteration. Enable basic logging and retry. + attr.LogLevel = LogLevelBranch + attr.LogSize = internal.Between(startSize, minVerifierLogSize, maxVerifierLogSize) + return true + } + + // Loading the program failed for a reason other than buffer size and the log + // was already enabled the previous iteration. Don't retry. + return false } -// NewProgramFromFD creates a program from a raw fd. +// NewProgramFromFD creates a [Program] around a raw fd. // // You should not use fd after calling this function. // -// Requires at least Linux 4.10. +// Requires at least Linux 4.13. Returns an error on Windows. func NewProgramFromFD(fd int) (*Program, error) { f, err := sys.NewFD(fd) if err != nil { @@ -441,9 +582,10 @@ func NewProgramFromFD(fd int) (*Program, error) { return newProgramFromFD(f) } -// NewProgramFromID returns the program for a given id. +// NewProgramFromID returns the [Program] for a given program id. Returns +// [ErrNotExist] if there is no eBPF program with the given id. // -// Returns ErrNotExist, if there is no eBPF program with the given id. +// Requires at least Linux 4.13. func NewProgramFromID(id ProgramID) (*Program, error) { fd, err := sys.ProgGetFdById(&sys.ProgGetFdByIdAttr{ Id: uint32(id), @@ -456,13 +598,13 @@ func NewProgramFromID(id ProgramID) (*Program, error) { } func newProgramFromFD(fd *sys.FD) (*Program, error) { - info, err := newProgramInfoFromFd(fd) + info, err := minimalProgramInfoFromFd(fd) if err != nil { fd.Close() return nil, fmt.Errorf("discover program type: %w", err) } - return &Program{"", fd, "", "", info.Type}, nil + return &Program{"", fd, info.Name, "", info.Type}, nil } func (p *Program) String() string { @@ -484,6 +626,32 @@ func (p *Program) Info() (*ProgramInfo, error) { return newProgramInfoFromFd(p.fd) } +// Stats returns runtime statistics about the Program. Requires BPF statistics +// collection to be enabled, see [EnableStats]. +// +// Requires at least Linux 5.8. +func (p *Program) Stats() (*ProgramStats, error) { + return newProgramStatsFromFd(p.fd) +} + +// Handle returns a reference to the program's type information in the kernel. +// +// Returns ErrNotSupported if the kernel has no BTF support, or if there is no +// BTF associated with the program. +func (p *Program) Handle() (*btf.Handle, error) { + info, err := p.Info() + if err != nil { + return nil, err + } + + id, ok := info.BTFID() + if !ok { + return nil, fmt.Errorf("program %s: retrieve BTF ID: %w", p, ErrNotSupported) + } + + return btf.NewHandleFromID(id) +} + // FD gets the file descriptor of the Program. // // It is invalid to call this function after Close has been called. @@ -515,9 +683,10 @@ func (p *Program) Clone() (*Program, error) { // Calling Pin on a previously pinned program will overwrite the path, except when // the new path already exists. Re-pinning across filesystems is not supported. // -// This requires bpffs to be mounted above fileName. See https://docs.cilium.io/en/k8s-doc/admin/#admin-mount-bpffs +// This requires bpffs to be mounted above fileName. +// See https://docs.cilium.io/en/stable/network/kubernetes/configuration/#mounting-bpffs-with-systemd func (p *Program) Pin(fileName string) error { - if err := internal.Pin(p.pinnedPath, fileName, p.fd); err != nil { + if err := sys.Pin(p.pinnedPath, fileName, p.fd); err != nil { return err } p.pinnedPath = fileName @@ -530,7 +699,7 @@ func (p *Program) Pin(fileName string) error { // // Unpinning an unpinned Program returns nil. func (p *Program) Unpin() error { - if err := internal.Unpin(p.pinnedPath); err != nil { + if err := sys.Unpin(p.pinnedPath); err != nil { return err } p.pinnedPath = "" @@ -542,7 +711,9 @@ func (p *Program) IsPinned() bool { return p.pinnedPath != "" } -// Close unloads the program from the kernel. +// Close the Program's underlying file descriptor, which could unload +// the program from the kernel if it is not pinned or attached to a +// kernel hook. func (p *Program) Close() error { if p == nil { return nil @@ -551,19 +722,80 @@ func (p *Program) Close() error { return p.fd.Close() } +// Various options for Run'ing a Program +type RunOptions struct { + // Program's data input. Required field. + // + // The kernel expects at least 14 bytes input for an ethernet header for + // XDP and SKB programs. + Data []byte + // Program's data after Program has run. Caller must allocate. Optional field. + DataOut []byte + // Program's context input. Optional field. + Context interface{} + // Program's context after Program has run. Must be a pointer or slice. Optional field. + ContextOut interface{} + // Minimum number of times to run Program. Optional field. Defaults to 1. + // + // The program may be executed more often than this due to interruptions, e.g. + // when runtime.AllThreadsSyscall is invoked. + Repeat uint32 + // Optional flags. + Flags uint32 + // CPU to run Program on. Optional field. + // Note not all program types support this field. + CPU uint32 + // Called whenever the syscall is interrupted, and should be set to testing.B.ResetTimer + // or similar. Typically used during benchmarking. Optional field. + // + // Deprecated: use [testing.B.ReportMetric] with unit "ns/op" instead. + Reset func() +} + // Test runs the Program in the kernel with the given input and returns the -// value returned by the eBPF program. outLen may be zero. +// value returned by the eBPF program. // // Note: the kernel expects at least 14 bytes input for an ethernet header for // XDP and SKB programs. // // This function requires at least Linux 4.12. func (p *Program) Test(in []byte) (uint32, []byte, error) { - ret, out, _, err := p.testRun(in, 1, nil) + // Older kernels ignore the dataSizeOut argument when copying to user space. + // Combined with things like bpf_xdp_adjust_head() we don't really know what the final + // size will be. Hence we allocate an output buffer which we hope will always be large + // enough, and panic if the kernel wrote past the end of the allocation. + // See https://patchwork.ozlabs.org/cover/1006822/ + var out []byte + if len(in) > 0 { + out = make([]byte, len(in)+outputPad) + } + + opts := RunOptions{ + Data: in, + DataOut: out, + Repeat: 1, + } + + ret, _, err := p.run(&opts) if err != nil { - return ret, nil, fmt.Errorf("can't test program: %w", err) + return ret, nil, fmt.Errorf("test program: %w", err) } - return ret, out, nil + return ret, opts.DataOut, nil +} + +// Run runs the Program in kernel with given RunOptions. +// +// Note: the same restrictions from Test apply. +func (p *Program) Run(opts *RunOptions) (uint32, error) { + if opts == nil { + opts = &RunOptions{} + } + + ret, _, err := p.run(opts) + if err != nil { + return ret, fmt.Errorf("run program: %w", err) + } + return ret, nil } // Benchmark runs the Program with the given input for a number of times @@ -573,20 +805,32 @@ func (p *Program) Test(in []byte) (uint32, []byte, error) { // run or an error. reset is called whenever the benchmark syscall is // interrupted, and should be set to testing.B.ResetTimer or similar. // -// Note: profiling a call to this function will skew it's results, see -// https://github.com/cilium/ebpf/issues/24 -// // This function requires at least Linux 4.12. func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.Duration, error) { - ret, _, total, err := p.testRun(in, repeat, reset) + if uint(repeat) > math.MaxUint32 { + return 0, 0, fmt.Errorf("repeat is too high") + } + + opts := RunOptions{ + Data: in, + Repeat: uint32(repeat), + Reset: reset, + } + + ret, total, err := p.run(&opts) if err != nil { - return ret, total, fmt.Errorf("can't benchmark program: %w", err) + return ret, total, fmt.Errorf("benchmark program: %w", err) } return ret, total, nil } -var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() error { +var haveProgRun = internal.NewFeatureTest("BPF_PROG_RUN", func() error { + if platform.IsWindows { + return nil + } + prog, err := NewProgram(&ProgramSpec{ + // SocketFilter does not require privileges on newer kernels. Type: SocketFilter, Instructions: asm.Instructions{ asm.LoadImm(asm.R0, 0, asm.DWord), @@ -600,99 +844,158 @@ var haveProgTestRun = internal.FeatureTest("BPF_PROG_TEST_RUN", "4.12", func() e } defer prog.Close() - // Programs require at least 14 bytes input - in := make([]byte, 14) + in := internal.EmptyBPFContext attr := sys.ProgRunAttr{ ProgFd: uint32(prog.FD()), DataSizeIn: uint32(len(in)), - DataIn: sys.NewSlicePointer(in), + DataIn: sys.SlicePointer(in), } err = sys.ProgRun(&attr) - if errors.Is(err, unix.EINVAL) { + switch { + case errors.Is(err, unix.EINVAL): // Check for EINVAL specifically, rather than err != nil since we // otherwise misdetect due to insufficient permissions. return internal.ErrNotSupported - } - if errors.Is(err, unix.EINTR) { + + case errors.Is(err, unix.EINTR): // We know that PROG_TEST_RUN is supported if we get EINTR. return nil + + case errors.Is(err, sys.ENOTSUPP): + // The first PROG_TEST_RUN patches shipped in 4.12 didn't include + // a test runner for SocketFilter. ENOTSUPP means PROG_TEST_RUN is + // supported, but not for the program type used in the probe. + return nil } + return err -}) +}, "4.12", "windows:0.20") -func (p *Program) testRun(in []byte, repeat int, reset func()) (uint32, []byte, time.Duration, error) { - if uint(repeat) > math.MaxUint32 { - return 0, nil, 0, fmt.Errorf("repeat is too high") +func (p *Program) run(opts *RunOptions) (uint32, time.Duration, error) { + if uint(len(opts.Data)) > math.MaxUint32 { + return 0, 0, fmt.Errorf("input is too long") } - if len(in) == 0 { - return 0, nil, 0, fmt.Errorf("missing input") + if err := haveProgRun(); err != nil { + return 0, 0, err } - if uint(len(in)) > math.MaxUint32 { - return 0, nil, 0, fmt.Errorf("input is too long") + var ctxIn []byte + if opts.Context != nil { + var err error + ctxIn, err = binary.Append(nil, internal.NativeEndian, opts.Context) + if err != nil { + return 0, 0, fmt.Errorf("cannot serialize context: %v", err) + } } - if err := haveProgTestRun(); err != nil { - return 0, nil, 0, err + var ctxOut []byte + if opts.ContextOut != nil { + ctxOut = make([]byte, binary.Size(opts.ContextOut)) + } else if platform.IsWindows && len(ctxIn) > 0 { + // Windows rejects a non-zero ctxIn with a nil ctxOut. + ctxOut = make([]byte, len(ctxIn)) } - // Older kernels ignore the dataSizeOut argument when copying to user space. - // Combined with things like bpf_xdp_adjust_head() we don't really know what the final - // size will be. Hence we allocate an output buffer which we hope will always be large - // enough, and panic if the kernel wrote past the end of the allocation. - // See https://patchwork.ozlabs.org/cover/1006822/ - out := make([]byte, len(in)+outputPad) - attr := sys.ProgRunAttr{ ProgFd: p.fd.Uint(), - DataSizeIn: uint32(len(in)), - DataSizeOut: uint32(len(out)), - DataIn: sys.NewSlicePointer(in), - DataOut: sys.NewSlicePointer(out), - Repeat: uint32(repeat), + DataSizeIn: uint32(len(opts.Data)), + DataSizeOut: uint32(len(opts.DataOut)), + DataIn: sys.SlicePointer(opts.Data), + DataOut: sys.SlicePointer(opts.DataOut), + Repeat: uint32(opts.Repeat), + CtxSizeIn: uint32(len(ctxIn)), + CtxSizeOut: uint32(len(ctxOut)), + CtxIn: sys.SlicePointer(ctxIn), + CtxOut: sys.SlicePointer(ctxOut), + Flags: opts.Flags, + Cpu: opts.CPU, + } + + if p.Type() == Syscall && ctxIn != nil && ctxOut != nil { + // Linux syscall program errors on non-nil ctxOut, uses ctxIn + // for both input and output. Shield the user from this wart. + if len(ctxIn) != len(ctxOut) { + return 0, 0, errors.New("length mismatch: Context and ContextOut") + } + attr.CtxOut, attr.CtxSizeOut = sys.TypedPointer[uint8]{}, 0 + ctxOut = ctxIn } +retry: for { err := sys.ProgRun(&attr) if err == nil { - break + break retry } if errors.Is(err, unix.EINTR) { - if reset != nil { - reset() + if attr.Repeat <= 1 { + // Older kernels check whether enough repetitions have been + // executed only after checking for pending signals. + // + // run signal? done? run ... + // + // As a result we can get EINTR for repeat==1 even though + // the program was run exactly once. Treat this as a + // successful run instead. + // + // Since commit 607b9cc92bd7 ("bpf: Consolidate shared test timing code") + // the conditions are reversed: + // run done? signal? ... + break retry } - continue + + if opts.Reset != nil { + opts.Reset() + } + continue retry } - return 0, nil, 0, fmt.Errorf("can't run test: %w", err) + if errors.Is(err, sys.ENOTSUPP) { + return 0, 0, fmt.Errorf("kernel doesn't support running %s: %w", p.Type(), ErrNotSupported) + } + + return 0, 0, err } - if int(attr.DataSizeOut) > cap(out) { - // Houston, we have a problem. The program created more data than we allocated, - // and the kernel wrote past the end of our buffer. - panic("kernel wrote past end of output buffer") + if opts.DataOut != nil { + if int(attr.DataSizeOut) > cap(opts.DataOut) { + // Houston, we have a problem. The program created more data than we allocated, + // and the kernel wrote past the end of our buffer. + panic("kernel wrote past end of output buffer") + } + opts.DataOut = opts.DataOut[:int(attr.DataSizeOut)] + } + + if opts.ContextOut != nil { + b := bytes.NewReader(ctxOut) + if err := binary.Read(b, internal.NativeEndian, opts.ContextOut); err != nil { + return 0, 0, fmt.Errorf("failed to decode ContextOut: %v", err) + } } - out = out[:int(attr.DataSizeOut)] total := time.Duration(attr.Duration) * time.Nanosecond - return attr.Retval, out, total, nil + return attr.Retval, total, nil } -func unmarshalProgram(buf []byte) (*Program, error) { - if len(buf) != 4 { - return nil, errors.New("program id requires 4 byte value") +func unmarshalProgram(buf sysenc.Buffer) (*Program, error) { + var id uint32 + if err := buf.Unmarshal(&id); err != nil { + return nil, err } // Looking up an entry in a nested map or prog array returns an id, // not an fd. - id := internal.NativeEndian.Uint32(buf) return NewProgramFromID(ProgramID(id)) } func marshalProgram(p *Program, length int) ([]byte, error) { + if p == nil { + return nil, errors.New("can't marshal a nil Program") + } + if length != 4 { return nil, fmt.Errorf("can't marshal program to %d bytes", length) } @@ -702,50 +1005,12 @@ func marshalProgram(p *Program, length int) ([]byte, error) { return buf, nil } -// Attach a Program. -// -// Deprecated: use link.RawAttachProgram instead. -func (p *Program) Attach(fd int, typ AttachType, flags AttachFlags) error { - if fd < 0 { - return errors.New("invalid fd") - } - - attr := sys.ProgAttachAttr{ - TargetFd: uint32(fd), - AttachBpfFd: p.fd.Uint(), - AttachType: uint32(typ), - AttachFlags: uint32(flags), - } - - return sys.ProgAttach(&attr) -} - -// Detach a Program. -// -// Deprecated: use link.RawDetachProgram instead. -func (p *Program) Detach(fd int, typ AttachType, flags AttachFlags) error { - if fd < 0 { - return errors.New("invalid fd") - } - - if flags != 0 { - return errors.New("flags must be zero") - } - - attr := sys.ProgAttachAttr{ - TargetFd: uint32(fd), - AttachBpfFd: p.fd.Uint(), - AttachType: uint32(typ), - } - - return sys.ProgAttach(&attr) -} - -// LoadPinnedProgram loads a Program from a BPF file. +// LoadPinnedProgram loads a Program from a pin (file) on the BPF virtual +// filesystem. // // Requires at least Linux 4.11. func LoadPinnedProgram(fileName string, opts *LoadPinOptions) (*Program, error) { - fd, err := sys.ObjGet(&sys.ObjGetAttr{ + fd, typ, err := sys.ObjGetTyped(&sys.ObjGetAttr{ Pathname: sys.NewStringPointer(fileName), FileFlags: opts.Marshal(), }) @@ -753,29 +1018,21 @@ func LoadPinnedProgram(fileName string, opts *LoadPinOptions) (*Program, error) return nil, err } - info, err := newProgramInfoFromFd(fd) - if err != nil { + if typ != sys.BPF_TYPE_PROG { _ = fd.Close() - return nil, fmt.Errorf("info for %s: %w", fileName, err) + return nil, fmt.Errorf("%s is not a Program", fileName) } - return &Program{"", fd, filepath.Base(fileName), fileName, info.Type}, nil -} + p, err := newProgramFromFD(fd) + if err == nil { + p.pinnedPath = fileName -// SanitizeName replaces all invalid characters in name with replacement. -// Passing a negative value for replacement will delete characters instead -// of replacing them. Use this to automatically generate valid names for maps -// and programs at runtime. -// -// The set of allowed characters depends on the running kernel version. -// Dots are only allowed as of kernel 5.2. -func SanitizeName(name string, replacement rune) string { - return strings.Map(func(char rune) rune { - if invalidBPFObjNameChar(char) { - return replacement + if haveObjName() != nil { + p.name = filepath.Base(fileName) } - return char - }, name) + } + + return p, err } // ProgramGetNextID returns the ID of the next eBPF program. @@ -786,17 +1043,6 @@ func ProgramGetNextID(startID ProgramID) (ProgramID, error) { return ProgramID(attr.NextId), sys.ProgGetNextId(attr) } -// ID returns the systemwide unique ID of the program. -// -// Deprecated: use ProgramInfo.ID() instead. -func (p *Program) ID() (ProgramID, error) { - var info sys.ProgInfo - if err := sys.ObjInfo(p.fd, &info); err != nil { - return ProgramID(0), err - } - return ProgramID(info.Id), nil -} - // BindMap binds map to the program and is only released once program is released. // // This may be used in cases where metadata should be associated with the program @@ -810,7 +1056,19 @@ func (p *Program) BindMap(m *Map) error { return sys.ProgBindMap(attr) } -func resolveBTFType(spec *btf.Spec, name string, progType ProgramType, attachType AttachType) (btf.Type, error) { +var errUnrecognizedAttachType = errors.New("unrecognized attach type") + +// find an attach target type in the kernel. +// +// name, progType and attachType determine which type we need to attach to. +// +// The attach target may be in a loaded kernel module. +// In that case the returned handle will be non-nil. +// The caller is responsible for closing the handle. +// +// Returns errUnrecognizedAttachType if the combination of progType and attachType +// is not recognised. +func findProgramTargetInKernel(name string, progType ProgramType, attachType AttachType, cache *btf.Cache) (*btf.Handle, btf.TypeID, error) { type match struct { p ProgramType a AttachType @@ -818,65 +1076,169 @@ func resolveBTFType(spec *btf.Spec, name string, progType ProgramType, attachTyp var ( typeName, featureName string - isBTFTypeFunc = true + target btf.Type ) switch (match{progType, attachType}) { + case match{StructOps, AttachStructOps}: + typeName = name + featureName = "struct_ops " + name + target = (*btf.Struct)(nil) case match{LSM, AttachLSMMac}: typeName = "bpf_lsm_" + name featureName = name + " LSM hook" + target = (*btf.Func)(nil) case match{Tracing, AttachTraceIter}: typeName = "bpf_iter_" + name featureName = name + " iterator" - case match{Extension, AttachNone}: - typeName = name - featureName = fmt.Sprintf("freplace %s", name) + target = (*btf.Func)(nil) case match{Tracing, AttachTraceFEntry}: typeName = name featureName = fmt.Sprintf("fentry %s", name) + target = (*btf.Func)(nil) case match{Tracing, AttachTraceFExit}: typeName = name featureName = fmt.Sprintf("fexit %s", name) + target = (*btf.Func)(nil) case match{Tracing, AttachModifyReturn}: typeName = name featureName = fmt.Sprintf("fmod_ret %s", name) + target = (*btf.Func)(nil) case match{Tracing, AttachTraceRawTp}: typeName = fmt.Sprintf("btf_trace_%s", name) featureName = fmt.Sprintf("raw_tp %s", name) - isBTFTypeFunc = false + target = (*btf.Typedef)(nil) default: - return nil, nil + return nil, 0, errUnrecognizedAttachType } - var ( - target btf.Type - err error - ) - if spec == nil { - spec, err = btf.LoadKernelSpec() - if err != nil { - return nil, fmt.Errorf("load kernel spec: %w", err) - } + spec, module, err := findTargetInKernel(typeName, &target, cache) + if errors.Is(err, btf.ErrNotFound) { + return nil, 0, &internal.UnsupportedFeatureError{Name: featureName} + } + // See cilium/ebpf#894. Until we can disambiguate between equally-named kernel + // symbols, we should explicitly refuse program loads. They will not reliably + // do what the caller intended. + if errors.Is(err, btf.ErrMultipleMatches) { + return nil, 0, fmt.Errorf("attaching to ambiguous kernel symbol is not supported: %w", err) + } + if err != nil { + return nil, 0, fmt.Errorf("find target for %s: %w", featureName, err) } - if isBTFTypeFunc { - var targetFunc *btf.Func - err = spec.TypeByName(typeName, &targetFunc) - target = targetFunc - } else { - var targetTypedef *btf.Typedef - err = spec.TypeByName(typeName, &targetTypedef) - target = targetTypedef + id, err := spec.TypeID(target) + if err != nil { + module.Close() + return nil, 0, err } + return module, id, nil +} + +// findTargetInKernel attempts to find a named type in the current kernel. +// +// target will point at the found type after a successful call. Searches both +// vmlinux and any loaded modules. +// +// Returns a non-nil handle if the type was found in a module, or btf.ErrNotFound +// if the type wasn't found at all. +func findTargetInKernel(typeName string, target *btf.Type, cache *btf.Cache) (*btf.Spec, *btf.Handle, error) { + kernelSpec, err := cache.Kernel() + if err != nil { + return nil, nil, err + } + + err = kernelSpec.TypeByName(typeName, target) + if errors.Is(err, btf.ErrNotFound) { + spec, module, err := findTargetInModule(typeName, target, cache) + if err != nil { + return nil, nil, fmt.Errorf("find target in modules: %w", err) + } + return spec, module, nil + } if err != nil { + return nil, nil, fmt.Errorf("find target in vmlinux: %w", err) + } + return kernelSpec, nil, err +} + +// findTargetInModule attempts to find a named type in any loaded module. +// +// base must contain the kernel's types and is used to parse kmod BTF. Modules +// are searched in the order they were loaded. +// +// Returns btf.ErrNotFound if the target can't be found in any module. +func findTargetInModule(typeName string, target *btf.Type, cache *btf.Cache) (*btf.Spec, *btf.Handle, error) { + it := new(btf.HandleIterator) + defer it.Handle.Close() + + for it.Next() { + info, err := it.Handle.Info() + if err != nil { + return nil, nil, fmt.Errorf("get info for BTF ID %d: %w", it.ID, err) + } + + if !info.IsModule() { + continue + } + + spec, err := cache.Module(info.Name) + if err != nil { + return nil, nil, fmt.Errorf("parse types for module %s: %w", info.Name, err) + } + + err = spec.TypeByName(typeName, target) if errors.Is(err, btf.ErrNotFound) { - return nil, &internal.UnsupportedFeatureError{ - Name: featureName, - } + continue } - return nil, fmt.Errorf("resolve BTF for %s: %w", featureName, err) + if err != nil { + return nil, nil, fmt.Errorf("lookup type in module %s: %w", info.Name, err) + } + + return spec, it.Take(), nil + } + if err := it.Err(); err != nil { + return nil, nil, fmt.Errorf("iterate modules: %w", err) + } + + return nil, nil, btf.ErrNotFound +} + +// find an attach target type in a program. +// +// Returns errUnrecognizedAttachType. +func findTargetInProgram(prog *Program, name string, progType ProgramType, attachType AttachType) (btf.TypeID, error) { + type match struct { + p ProgramType + a AttachType + } + + var typeName string + switch (match{progType, attachType}) { + case match{Extension, AttachNone}, + match{Tracing, AttachTraceFEntry}, + match{Tracing, AttachTraceFExit}: + typeName = name + default: + return 0, errUnrecognizedAttachType + } + + btfHandle, err := prog.Handle() + if err != nil { + return 0, fmt.Errorf("load target BTF: %w", err) + } + defer btfHandle.Close() + + spec, err := btfHandle.Spec(nil) + if err != nil { + return 0, err + } + + var targetFunc *btf.Func + err = spec.TypeByName(typeName, &targetFunc) + if err != nil { + return 0, fmt.Errorf("find target %s: %w", typeName, err) } - return target, nil + return spec.TypeID(targetFunc) } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/run-tests.sh b/src/nvcgo/vendor/github.com/cilium/ebpf/run-tests.sh deleted file mode 100644 index 472bc4f1a..000000000 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/run-tests.sh +++ /dev/null @@ -1,132 +0,0 @@ -#!/bin/bash -# Test the current package under a different kernel. -# Requires virtme and qemu to be installed. -# Examples: -# Run all tests on a 5.4 kernel -# $ ./run-tests.sh 5.4 -# Run a subset of tests: -# $ ./run-tests.sh 5.4 ./link - -set -euo pipefail - -script="$(realpath "$0")" -readonly script - -# This script is a bit like a Matryoshka doll since it keeps re-executing itself -# in various different contexts: -# -# 1. invoked by the user like run-tests.sh 5.4 -# 2. invoked by go test like run-tests.sh --exec-vm -# 3. invoked by init in the vm like run-tests.sh --exec-test -# -# This allows us to use all available CPU on the host machine to compile our -# code, and then only use the VM to execute the test. This is because the VM -# is usually slower at compiling than the host. -if [[ "${1:-}" = "--exec-vm" ]]; then - shift - - input="$1" - shift - - # Use sudo if /dev/kvm isn't accessible by the current user. - sudo="" - if [[ ! -r /dev/kvm || ! -w /dev/kvm ]]; then - sudo="sudo" - fi - readonly sudo - - testdir="$(dirname "$1")" - output="$(mktemp -d)" - printf -v cmd "%q " "$@" - - if [[ "$(stat -c '%t:%T' -L /proc/$$/fd/0)" == "1:3" ]]; then - # stdin is /dev/null, which doesn't play well with qemu. Use a fifo as a - # blocking substitute. - mkfifo "${output}/fake-stdin" - # Open for reading and writing to avoid blocking. - exec 0<> "${output}/fake-stdin" - rm "${output}/fake-stdin" - fi - - if ! $sudo virtme-run --kimg "${input}/bzImage" --memory 768M --pwd \ - --rwdir="${testdir}=${testdir}" \ - --rodir=/run/input="${input}" \ - --rwdir=/run/output="${output}" \ - --script-sh "PATH=\"$PATH\" CI_MAX_KERNEL_VERSION="${CI_MAX_KERNEL_VERSION:-}" \"$script\" --exec-test $cmd" \ - --kopt possible_cpus=2; then # need at least two CPUs for some tests - exit 23 - fi - - if [[ ! -e "${output}/success" ]]; then - exit 42 - fi - - $sudo rm -r "$output" - exit 0 -elif [[ "${1:-}" = "--exec-test" ]]; then - shift - - mount -t bpf bpf /sys/fs/bpf - mount -t tracefs tracefs /sys/kernel/debug/tracing - - if [[ -d "/run/input/bpf" ]]; then - export KERNEL_SELFTESTS="/run/input/bpf" - fi - - dmesg -C - if ! "$@"; then - dmesg - exit 1 # this return code is "swallowed" by qemu - fi - touch "/run/output/success" - exit 0 -fi - -readonly kernel_version="${1:-}" -if [[ -z "${kernel_version}" ]]; then - echo "Expecting kernel version as first argument" - exit 1 -fi -shift - -readonly kernel="linux-${kernel_version}.bz" -readonly selftests="linux-${kernel_version}-selftests-bpf.tgz" -readonly input="$(mktemp -d)" -readonly tmp_dir="${TMPDIR:-/tmp}" -readonly branch="${BRANCH:-master}" - -fetch() { - echo Fetching "${1}" - pushd "${tmp_dir}" > /dev/null - curl -s -L -O --fail --etag-compare "${1}.etag" --etag-save "${1}.etag" "https://github.com/cilium/ci-kernels/raw/${branch}/${1}" - local ret=$? - popd > /dev/null - return $ret -} - -fetch "${kernel}" -cp "${tmp_dir}/${kernel}" "${input}/bzImage" - -if fetch "${selftests}"; then - echo "Decompressing selftests" - mkdir "${input}/bpf" - tar --strip-components=4 -xf "${tmp_dir}/${selftests}" -C "${input}/bpf" -else - echo "No selftests found, disabling" -fi - -args=(-short -coverpkg=./... -coverprofile=coverage.out -count 1 ./...) -if (( $# > 0 )); then - args=("$@") -fi - -export GOFLAGS=-mod=readonly -export CGO_ENABLED=0 -# LINUX_VERSION_CODE test compares this to discovered value. -export KERNEL_VERSION="${kernel_version}" - -echo Testing on "${kernel_version}" -go test -exec "$script --exec-vm $input" "${args[@]}" -echo "Test successful on ${kernel_version}" - -rm -r "${input}" diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/staticcheck.conf b/src/nvcgo/vendor/github.com/cilium/ebpf/staticcheck.conf new file mode 100644 index 000000000..cfc907da3 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/staticcheck.conf @@ -0,0 +1,3 @@ +# Default configuration from https://staticcheck.dev/docs/configuration with +# SA4003 disabled. Remove when https://github.com/cilium/ebpf/issues/1876 is fixed. +checks = ["all", "-SA9003", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023", "-SA4003"] diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/struct_ops.go b/src/nvcgo/vendor/github.com/cilium/ebpf/struct_ops.go new file mode 100644 index 000000000..162f344ea --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/struct_ops.go @@ -0,0 +1,139 @@ +package ebpf + +import ( + "fmt" + "reflect" + "strings" + + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" +) + +const structOpsValuePrefix = "bpf_struct_ops_" + +// structOpsFindInnerType returns the "inner" struct inside a value struct_ops type. +// +// Given a value like: +// +// struct bpf_struct_ops_bpf_testmod_ops { +// struct bpf_struct_ops_common common; +// struct bpf_testmod_ops data; +// }; +// +// this function returns the *btf.Struct for "bpf_testmod_ops" along with the +// byte offset of the "data" member inside the value type. +// +// The inner struct name is derived by trimming the "bpf_struct_ops_" prefix +// from the value's name. +func structOpsFindInnerType(vType *btf.Struct) (*btf.Struct, uint32, error) { + innerName := strings.TrimPrefix(vType.Name, structOpsValuePrefix) + + for _, m := range vType.Members { + if st, ok := btf.As[*btf.Struct](m.Type); ok && st.Name == innerName { + return st, m.Offset.Bytes(), nil + } + } + + return nil, 0, fmt.Errorf("inner struct %q not found in %s", innerName, vType.Name) +} + +// structOpsFindTarget resolves the kernel-side "value struct" for a struct_ops map. +func structOpsFindTarget(userType *btf.Struct, cache *btf.Cache) (vType *btf.Struct, id btf.TypeID, module *btf.Handle, err error) { + // the kernel value type name, e.g. "bpf_struct_ops_" + vTypeName := structOpsValuePrefix + userType.Name + + target := btf.Type((*btf.Struct)(nil)) + spec, module, err := findTargetInKernel(vTypeName, &target, cache) + if err != nil { + return nil, 0, nil, fmt.Errorf("lookup value type %q: %w", vTypeName, err) + } + + id, err = spec.TypeID(target) + if err != nil { + return nil, 0, nil, err + } + + return target.(*btf.Struct), id, module, nil +} + +// structOpsPopulateValue writes a `prog FD` which references to `p` into the +// struct_ops value buffer `kernVData` at byte offset `dstOff` corresponding to +// the member `km`. +func structOpsPopulateValue(km btf.Member, kernVData []byte, p *Program) error { + kmPtr, ok := btf.As[*btf.Pointer](km.Type) + if !ok { + return fmt.Errorf("member %s is not a func pointer", km.Name) + } + + if _, isFuncProto := btf.As[*btf.FuncProto](kmPtr.Target); !isFuncProto { + return fmt.Errorf("member %s is not a func pointer", km.Name) + } + + dstOff := int(km.Offset.Bytes()) + if dstOff < 0 || dstOff+8 > len(kernVData) { + return fmt.Errorf("member %q: value buffer too small for func ptr", km.Name) + } + + internal.NativeEndian.PutUint64(kernVData[dstOff:dstOff+8], uint64(p.FD())) + return nil +} + +// structOpsCopyMember copies a single member from the user struct (m) +// into the kernel value struct (km) for struct_ops. +func structOpsCopyMember(m, km btf.Member, data []byte, kernVData []byte) error { + mSize, err := btf.Sizeof(m.Type) + if err != nil { + return fmt.Errorf("sizeof(user.%s): %w", m.Name, err) + } + kSize, err := btf.Sizeof(km.Type) + if err != nil { + return fmt.Errorf("sizeof(kernel.%s): %w", km.Name, err) + } + if mSize != kSize { + return fmt.Errorf("size mismatch for %s: user=%d kernel=%d", m.Name, mSize, kSize) + } + if km.BitfieldSize > 0 || m.BitfieldSize > 0 { + return fmt.Errorf("bitfield %s not supported", m.Name) + } + + srcOff := int(m.Offset.Bytes()) + dstOff := int(km.Offset.Bytes()) + + if srcOff < 0 || srcOff+mSize > len(data) { + return fmt.Errorf("member %q: userdata is too small", m.Name) + } + + if dstOff < 0 || dstOff+mSize > len(kernVData) { + return fmt.Errorf("member %q: value type is too small", m.Name) + } + + // skip mods(const, restrict, volatile and typetag) + // and typedef to check type compatibility + mType := btf.UnderlyingType(m.Type) + kernMType := btf.UnderlyingType(km.Type) + if reflect.TypeOf(mType) != reflect.TypeOf(kernMType) { + return fmt.Errorf("unmatched member type %s != %s (kernel)", m.Name, km.Name) + } + + switch mType.(type) { + case *btf.Struct, *btf.Union: + if !structOpsIsMemZeroed(data[srcOff : srcOff+mSize]) { + return fmt.Errorf("non-zero nested struct %s: %w", m.Name, ErrNotSupported) + } + // the bytes has zeroed value, we simply skip the copy. + return nil + } + + copy(kernVData[dstOff:dstOff+mSize], data[srcOff:srcOff+mSize]) + return nil +} + +// structOpsIsMemZeroed() checks whether all bytes in data are zero. +func structOpsIsMemZeroed(data []byte) bool { + for _, b := range data { + if b != 0 { + return false + } + } + return true +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/syscalls.go b/src/nvcgo/vendor/github.com/cilium/ebpf/syscalls.go index ccbbe096e..f0f42b77d 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/syscalls.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/syscalls.go @@ -4,41 +4,87 @@ import ( "bytes" "errors" "fmt" + "math" "os" + "runtime" + "strings" "github.com/cilium/ebpf/asm" "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/linux" + "github.com/cilium/ebpf/internal/platform" "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/tracefs" "github.com/cilium/ebpf/internal/unix" ) -// ErrNotExist is returned when loading a non-existing map or program. +var ( + // pre-allocating these here since they may + // get called in hot code paths and cause + // unnecessary memory allocations + sysErrKeyNotExist = sys.Error(ErrKeyNotExist, unix.ENOENT) + sysErrKeyExist = sys.Error(ErrKeyExist, unix.EEXIST) + sysErrNotSupported = sys.Error(ErrNotSupported, sys.ENOTSUPP) +) + +// sanitizeName replaces all invalid characters in name with replacement. +// Passing a negative value for replacement will delete characters instead +// of replacing them. // -// Deprecated: use os.ErrNotExist instead. -var ErrNotExist = os.ErrNotExist - -// invalidBPFObjNameChar returns true if char may not appear in -// a BPF object name. -func invalidBPFObjNameChar(char rune) bool { - dotAllowed := objNameAllowsDot() == nil - - switch { - case char >= 'A' && char <= 'Z': - return false - case char >= 'a' && char <= 'z': - return false - case char >= '0' && char <= '9': - return false - case dotAllowed && char == '.': - return false - case char == '_': - return false - default: - return true +// The set of allowed characters may change over time. +func sanitizeName(name string, replacement rune) string { + return strings.Map(func(char rune) rune { + switch { + case char >= 'A' && char <= 'Z': + return char + case char >= 'a' && char <= 'z': + return char + case char >= '0' && char <= '9': + return char + case char == '.': + return char + case char == '_': + return char + default: + return replacement + } + }, name) +} + +func maybeFillObjName(name string) sys.ObjName { + if errors.Is(haveObjName(), ErrNotSupported) { + return sys.ObjName{} + } + + name = sanitizeName(name, -1) + if errors.Is(objNameAllowsDot(), ErrNotSupported) { + name = strings.ReplaceAll(name, ".", "") + } + + return sys.NewObjName(name) +} + +func progLoad(insns asm.Instructions, typ ProgramType, license string) (*sys.FD, error) { + buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) + if err := insns.Marshal(buf, internal.NativeEndian); err != nil { + return nil, err } + bytecode := buf.Bytes() + + return sys.ProgLoad(&sys.ProgLoadAttr{ + ProgType: sys.ProgType(typ), + License: sys.NewStringPointer(license), + Insns: sys.SlicePointer(bytecode), + InsnCnt: uint32(len(bytecode) / asm.InstructionSize), + }) } -var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() error { +var haveNestedMaps = internal.NewFeatureTest("nested maps", func() error { + if platform.IsWindows { + // We only support efW versions which have this feature, no need to probe. + return nil + } + _, err := sys.MapCreate(&sys.MapCreateAttr{ MapType: sys.MapType(ArrayOfMaps), KeySize: 4, @@ -54,9 +100,9 @@ var haveNestedMaps = internal.FeatureTest("nested maps", "4.12", func() error { return nil } return err -}) +}, "4.12", "windows:0.21.0") -var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps", "5.2", func() error { +var haveMapMutabilityModifiers = internal.NewFeatureTest("read- and write-only maps", func() error { // This checks BPF_F_RDONLY_PROG and BPF_F_WRONLY_PROG. Since // BPF_MAP_FREEZE appeared in 5.2 as well we don't do a separate check. m, err := sys.MapCreate(&sys.MapCreateAttr{ @@ -64,62 +110,64 @@ var haveMapMutabilityModifiers = internal.FeatureTest("read- and write-only maps KeySize: 4, ValueSize: 4, MaxEntries: 1, - MapFlags: unix.BPF_F_RDONLY_PROG, + MapFlags: sys.BPF_F_RDONLY_PROG, }) if err != nil { return internal.ErrNotSupported } _ = m.Close() return nil -}) +}, "5.2") -var haveMmapableMaps = internal.FeatureTest("mmapable maps", "5.5", func() error { +var haveMmapableMaps = internal.NewFeatureTest("mmapable maps", func() error { // This checks BPF_F_MMAPABLE, which appeared in 5.5 for array maps. m, err := sys.MapCreate(&sys.MapCreateAttr{ MapType: sys.MapType(Array), KeySize: 4, ValueSize: 4, MaxEntries: 1, - MapFlags: unix.BPF_F_MMAPABLE, + MapFlags: sys.BPF_F_MMAPABLE, }) if err != nil { return internal.ErrNotSupported } _ = m.Close() return nil -}) +}, "5.5") -var haveInnerMaps = internal.FeatureTest("inner maps", "5.10", func() error { +var haveInnerMaps = internal.NewFeatureTest("inner maps", func() error { // This checks BPF_F_INNER_MAP, which appeared in 5.10. m, err := sys.MapCreate(&sys.MapCreateAttr{ MapType: sys.MapType(Array), KeySize: 4, ValueSize: 4, MaxEntries: 1, - MapFlags: unix.BPF_F_INNER_MAP, + MapFlags: sys.BPF_F_INNER_MAP, }) + if err != nil { return internal.ErrNotSupported } _ = m.Close() return nil -}) +}, "5.10") -var haveNoPreallocMaps = internal.FeatureTest("prealloc maps", "4.6", func() error { +var haveNoPreallocMaps = internal.NewFeatureTest("prealloc maps", func() error { // This checks BPF_F_NO_PREALLOC, which appeared in 4.6. m, err := sys.MapCreate(&sys.MapCreateAttr{ MapType: sys.MapType(Hash), KeySize: 4, ValueSize: 4, MaxEntries: 1, - MapFlags: unix.BPF_F_NO_PREALLOC, + MapFlags: sys.BPF_F_NO_PREALLOC, }) + if err != nil { return internal.ErrNotSupported } _ = m.Close() return nil -}) +}, "4.6") func wrapMapError(err error) error { if err == nil { @@ -127,15 +175,15 @@ func wrapMapError(err error) error { } if errors.Is(err, unix.ENOENT) { - return sys.Error(ErrKeyNotExist, unix.ENOENT) + return sysErrKeyNotExist } if errors.Is(err, unix.EEXIST) { - return sys.Error(ErrKeyExist, unix.EEXIST) + return sysErrKeyExist } - if errors.Is(err, unix.ENOTSUPP) { - return sys.Error(ErrNotSupported, unix.ENOTSUPP) + if errors.Is(err, sys.ENOTSUPP) { + return sysErrNotSupported } if errors.Is(err, unix.E2BIG) { @@ -145,7 +193,12 @@ func wrapMapError(err error) error { return err } -var haveObjName = internal.FeatureTest("object names", "4.15", func() error { +var haveObjName = internal.NewFeatureTest("object names", func() error { + if platform.IsWindows { + // We only support efW versions which have this feature, no need to probe. + return nil + } + attr := sys.MapCreateAttr{ MapType: sys.MapType(Array), KeySize: 4, @@ -161,9 +214,14 @@ var haveObjName = internal.FeatureTest("object names", "4.15", func() error { _ = fd.Close() return nil -}) +}, "4.15", "windows:0.21.0") + +var objNameAllowsDot = internal.NewFeatureTest("dot in object names", func() error { + if platform.IsWindows { + // We only support efW versions which have this feature, no need to probe. + return nil + } -var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func() error { if err := haveObjName(); err != nil { return err } @@ -183,9 +241,9 @@ var objNameAllowsDot = internal.FeatureTest("dot in object names", "5.2", func() _ = fd.Close() return nil -}) +}, "5.2", "windows:0.21.0") -var haveBatchAPI = internal.FeatureTest("map batch api", "5.6", func() error { +var haveBatchAPI = internal.NewFeatureTest("map batch api", func() error { var maxEntries uint32 = 2 attr := sys.MapCreateAttr{ MapType: sys.MapType(Hash), @@ -202,8 +260,8 @@ var haveBatchAPI = internal.FeatureTest("map batch api", "5.6", func() error { keys := []uint32{1, 2} values := []uint32{3, 4} - kp, _ := marshalPtr(keys, 8) - vp, _ := marshalPtr(values, 8) + kp, _ := marshalMapSyscallInput(keys, 8) + vp, _ := marshalMapSyscallInput(values, 8) err = sys.MapUpdateBatch(&sys.MapUpdateBatchAttr{ MapFd: fd.Uint(), @@ -215,9 +273,9 @@ var haveBatchAPI = internal.FeatureTest("map batch api", "5.6", func() error { return internal.ErrNotSupported } return nil -}) +}, "5.6") -var haveProbeReadKernel = internal.FeatureTest("bpf_probe_read_kernel", "5.5", func() error { +var haveProbeReadKernel = internal.NewFeatureTest("bpf_probe_read_kernel", func() error { insns := asm.Instructions{ asm.Mov.Reg(asm.R1, asm.R10), asm.Add.Imm(asm.R1, -8), @@ -226,21 +284,88 @@ var haveProbeReadKernel = internal.FeatureTest("bpf_probe_read_kernel", "5.5", f asm.FnProbeReadKernel.Call(), asm.Return(), } + + fd, err := progLoad(insns, Kprobe, "GPL") + if err != nil { + return internal.ErrNotSupported + } + _ = fd.Close() + return nil +}, "5.5") + +var haveBPFToBPFCalls = internal.NewFeatureTest("bpf2bpf calls", func() error { + insns := asm.Instructions{ + asm.Call.Label("prog2").WithSymbol("prog1"), + asm.Return(), + asm.Mov.Imm(asm.R0, 0).WithSymbol("prog2"), + asm.Return(), + } + + fd, err := progLoad(insns, SocketFilter, "MIT") + if err != nil { + return internal.ErrNotSupported + } + _ = fd.Close() + return nil +}, "4.16") + +var haveSyscallWrapper = internal.NewFeatureTest("syscall wrapper", func() error { + prefix := linux.PlatformPrefix() + if prefix == "" { + return fmt.Errorf("unable to find the platform prefix for (%s)", runtime.GOARCH) + } + + args := tracefs.ProbeArgs{ + Type: tracefs.Kprobe, + Symbol: prefix + "sys_bpf", + Pid: -1, + } + + var err error + args.Group, err = tracefs.RandomGroup("ebpf_probe") + if err != nil { + return err + } + + evt, err := tracefs.NewEvent(args) + if errors.Is(err, os.ErrNotExist) { + return internal.ErrNotSupported + } + if err != nil { + return err + } + + return evt.Close() +}, "4.17") + +var haveProgramExtInfos = internal.NewFeatureTest("program ext_infos", func() error { + insns := asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + } + buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) if err := insns.Marshal(buf, internal.NativeEndian); err != nil { return err } bytecode := buf.Bytes() - fd, err := sys.ProgLoad(&sys.ProgLoadAttr{ - ProgType: sys.ProgType(Kprobe), - License: sys.NewStringPointer("GPL"), - Insns: sys.NewSlicePointer(bytecode), - InsnCnt: uint32(len(bytecode) / asm.InstructionSize), + _, err := sys.ProgLoad(&sys.ProgLoadAttr{ + ProgType: sys.ProgType(SocketFilter), + License: sys.NewStringPointer("MIT"), + Insns: sys.SlicePointer(bytecode), + InsnCnt: uint32(len(bytecode) / asm.InstructionSize), + FuncInfoCnt: 1, + ProgBtfFd: math.MaxUint32, }) - if err != nil { - return internal.ErrNotSupported + + if errors.Is(err, unix.EBADF) { + return nil } - _ = fd.Close() - return nil -}) + + if errors.Is(err, unix.E2BIG) { + return ErrNotSupported + } + + return err +}, "5.0") diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/types.go b/src/nvcgo/vendor/github.com/cilium/ebpf/types.go index 46f006422..52ff75b5c 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/types.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/types.go @@ -1,23 +1,19 @@ package ebpf import ( - "github.com/cilium/ebpf/internal/unix" + "github.com/cilium/ebpf/internal/platform" + "github.com/cilium/ebpf/internal/sys" ) -//go:generate stringer -output types_string.go -type=MapType,ProgramType,PinType +//go:generate go tool stringer -output types_string.go -type=MapType,ProgramType,PinType // MapType indicates the type map structure // that will be initialized in the kernel. type MapType uint32 -// Max returns the latest supported MapType. -func (MapType) Max() MapType { - return maxMapType - 1 -} - // All the various map types that can be created const ( - UnspecifiedMap MapType = iota + UnspecifiedMap MapType = MapType(platform.LinuxTag | iota) // Hash is a hash map Hash // Array is an array map @@ -48,7 +44,7 @@ const ( // if an skb is from a socket belonging to a specific cgroup CGroupArray // LRUHash - This allows you to create a small hash structure that will purge the - // least recently used items rather than thow an error when you run out of memory + // least recently used items rather than throw an error when you run out of memory LRUHash // LRUCPUHash - This is NOT like PerCPUHash, this structure is shared among the CPUs, // it has more to do with including the CPU id with the LRU calculation so that if a @@ -99,149 +95,256 @@ const ( InodeStorage // TaskStorage - Specialized local storage map for task_struct. TaskStorage - // maxMapType - Bound enum of MapTypes, has to be last in enum. - maxMapType + // BloomFilter - Space-efficient data structure to quickly test whether an element exists in a set. + BloomFilter + // UserRingbuf - The reverse of RingBuf, used to send messages from user space to BPF programs. + UserRingbuf + // CgroupStorage - Store data keyed on a cgroup. If the cgroup disappears, the key is automatically removed. + CgroupStorage + // Arena - Sparse shared memory region between a BPF program and user space. + Arena +) + +// Map types (Windows). +const ( + WindowsHash MapType = MapType(platform.WindowsTag | iota + 1) + WindowsArray + WindowsProgramArray + WindowsPerCPUHash + WindowsPerCPUArray + WindowsHashOfMaps + WindowsArrayOfMaps + WindowsLRUHash + WindowsLPMTrie + WindowsQueue + WindowsLRUCPUHash + WindowsStack + WindowsRingBuf ) -// Deprecated: StructOpts was a typo, use StructOpsMap instead. +// MapTypeForPlatform returns a platform specific map type. // -// Declared as a variable to prevent stringer from picking it up -// as an enum value. -var StructOpts MapType = StructOpsMap +// Use this if the library doesn't provide a constant yet. +func MapTypeForPlatform(plat string, typ uint32) (MapType, error) { + return platform.EncodeConstant[MapType](plat, typ) +} // hasPerCPUValue returns true if the Map stores a value per CPU. func (mt MapType) hasPerCPUValue() bool { - return mt == PerCPUHash || mt == PerCPUArray || mt == LRUCPUHash || mt == PerCPUCGroupStorage + switch mt { + case PerCPUHash, PerCPUArray, LRUCPUHash, PerCPUCGroupStorage: + return true + case WindowsPerCPUHash, WindowsPerCPUArray, WindowsLRUCPUHash: + return true + default: + return false + } +} + +// canStoreMapOrProgram returns true if the Map stores references to another Map +// or Program. +func (mt MapType) canStoreMapOrProgram() bool { + return mt.canStoreMap() || mt.canStoreProgram() || mt == StructOpsMap } // canStoreMap returns true if the map type accepts a map fd // for update and returns a map id for lookup. func (mt MapType) canStoreMap() bool { - return mt == ArrayOfMaps || mt == HashOfMaps + return mt == ArrayOfMaps || mt == HashOfMaps || mt == WindowsArrayOfMaps || mt == WindowsHashOfMaps } // canStoreProgram returns true if the map type accepts a program fd // for update and returns a program id for lookup. func (mt MapType) canStoreProgram() bool { - return mt == ProgramArray + return mt == ProgramArray || mt == WindowsProgramArray } -// hasBTF returns true if the map type supports BTF key/value metadata. -func (mt MapType) hasBTF() bool { +// canHaveValueSize returns true if the map type supports setting a value size. +func (mt MapType) canHaveValueSize() bool { switch mt { - case PerfEventArray, CGroupArray, StackTrace, ArrayOfMaps, HashOfMaps, DevMap, - DevMapHash, CPUMap, XSKMap, SockMap, SockHash, Queue, Stack, RingBuf: + case RingBuf, Arena: return false - default: + + // Special-case perf events since they require a value size of either 0 or 4 + // for historical reasons. Let the library fix this up later. + case PerfEventArray: + return false + } + + return true +} + +// mustHaveNoPrealloc returns true if the map type does not support +// preallocation and needs the BPF_F_NO_PREALLOC flag set to be created +// successfully. +func (mt MapType) mustHaveNoPrealloc() bool { + switch mt { + case CgroupStorage, InodeStorage, TaskStorage, SkStorage: + return true + case LPMTrie: return true } + + return false } // ProgramType of the eBPF program type ProgramType uint32 -// Max return the latest supported ProgramType. -func (ProgramType) Max() ProgramType { - return maxProgramType - 1 -} +// eBPF program types (Linux). +const ( + UnspecifiedProgram = ProgramType(sys.BPF_PROG_TYPE_UNSPEC) + SocketFilter = ProgramType(sys.BPF_PROG_TYPE_SOCKET_FILTER) + Kprobe = ProgramType(sys.BPF_PROG_TYPE_KPROBE) + SchedCLS = ProgramType(sys.BPF_PROG_TYPE_SCHED_CLS) + SchedACT = ProgramType(sys.BPF_PROG_TYPE_SCHED_ACT) + TracePoint = ProgramType(sys.BPF_PROG_TYPE_TRACEPOINT) + XDP = ProgramType(sys.BPF_PROG_TYPE_XDP) + PerfEvent = ProgramType(sys.BPF_PROG_TYPE_PERF_EVENT) + CGroupSKB = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SKB) + CGroupSock = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCK) + LWTIn = ProgramType(sys.BPF_PROG_TYPE_LWT_IN) + LWTOut = ProgramType(sys.BPF_PROG_TYPE_LWT_OUT) + LWTXmit = ProgramType(sys.BPF_PROG_TYPE_LWT_XMIT) + SockOps = ProgramType(sys.BPF_PROG_TYPE_SOCK_OPS) + SkSKB = ProgramType(sys.BPF_PROG_TYPE_SK_SKB) + CGroupDevice = ProgramType(sys.BPF_PROG_TYPE_CGROUP_DEVICE) + SkMsg = ProgramType(sys.BPF_PROG_TYPE_SK_MSG) + RawTracepoint = ProgramType(sys.BPF_PROG_TYPE_RAW_TRACEPOINT) + CGroupSockAddr = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR) + LWTSeg6Local = ProgramType(sys.BPF_PROG_TYPE_LWT_SEG6LOCAL) + LircMode2 = ProgramType(sys.BPF_PROG_TYPE_LIRC_MODE2) + SkReuseport = ProgramType(sys.BPF_PROG_TYPE_SK_REUSEPORT) + FlowDissector = ProgramType(sys.BPF_PROG_TYPE_FLOW_DISSECTOR) + CGroupSysctl = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SYSCTL) + RawTracepointWritable = ProgramType(sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE) + CGroupSockopt = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCKOPT) + Tracing = ProgramType(sys.BPF_PROG_TYPE_TRACING) + StructOps = ProgramType(sys.BPF_PROG_TYPE_STRUCT_OPS) + Extension = ProgramType(sys.BPF_PROG_TYPE_EXT) + LSM = ProgramType(sys.BPF_PROG_TYPE_LSM) + SkLookup = ProgramType(sys.BPF_PROG_TYPE_SK_LOOKUP) + Syscall = ProgramType(sys.BPF_PROG_TYPE_SYSCALL) + Netfilter = ProgramType(sys.BPF_PROG_TYPE_NETFILTER) +) -// eBPF program types +// eBPF program types (Windows). +// +// See https://github.com/microsoft/ebpf-for-windows/blob/main/include/ebpf_structs.h#L170 const ( - UnspecifiedProgram ProgramType = iota - SocketFilter - Kprobe - SchedCLS - SchedACT - TracePoint - XDP - PerfEvent - CGroupSKB - CGroupSock - LWTIn - LWTOut - LWTXmit - SockOps - SkSKB - CGroupDevice - SkMsg - RawTracepoint - CGroupSockAddr - LWTSeg6Local - LircMode2 - SkReuseport - FlowDissector - CGroupSysctl - RawTracepointWritable - CGroupSockopt - Tracing - StructOps - Extension - LSM - SkLookup - Syscall - maxProgramType + WindowsXDP ProgramType = ProgramType(platform.WindowsTag) | (iota + 1) + WindowsBind + WindowsCGroupSockAddr + WindowsSockOps + WindowsXDPTest ProgramType = ProgramType(platform.WindowsTag) | 998 + WindowsSample ProgramType = ProgramType(platform.WindowsTag) | 999 ) +// ProgramTypeForPlatform returns a platform specific program type. +// +// Use this if the library doesn't provide a constant yet. +func ProgramTypeForPlatform(plat string, value uint32) (ProgramType, error) { + return platform.EncodeConstant[ProgramType](plat, value) +} + // AttachType of the eBPF program, needed to differentiate allowed context accesses in // some newer program types like CGroupSockAddr. Should be set to AttachNone if not required. // Will cause invalid argument (EINVAL) at program load time if set incorrectly. type AttachType uint32 -//go:generate stringer -type AttachType -trimprefix Attach +//go:generate go tool stringer -type AttachType -trimprefix Attach // AttachNone is an alias for AttachCGroupInetIngress for readability reasons. const AttachNone AttachType = 0 +// Attach types (Linux). +const ( + AttachCGroupInetIngress = AttachType(sys.BPF_CGROUP_INET_INGRESS) + AttachCGroupInetEgress = AttachType(sys.BPF_CGROUP_INET_EGRESS) + AttachCGroupInetSockCreate = AttachType(sys.BPF_CGROUP_INET_SOCK_CREATE) + AttachCGroupSockOps = AttachType(sys.BPF_CGROUP_SOCK_OPS) + AttachSkSKBStreamParser = AttachType(sys.BPF_SK_SKB_STREAM_PARSER) + AttachSkSKBStreamVerdict = AttachType(sys.BPF_SK_SKB_STREAM_VERDICT) + AttachCGroupDevice = AttachType(sys.BPF_CGROUP_DEVICE) + AttachSkMsgVerdict = AttachType(sys.BPF_SK_MSG_VERDICT) + AttachCGroupInet4Bind = AttachType(sys.BPF_CGROUP_INET4_BIND) + AttachCGroupInet6Bind = AttachType(sys.BPF_CGROUP_INET6_BIND) + AttachCGroupInet4Connect = AttachType(sys.BPF_CGROUP_INET4_CONNECT) + AttachCGroupInet6Connect = AttachType(sys.BPF_CGROUP_INET6_CONNECT) + AttachCGroupInet4PostBind = AttachType(sys.BPF_CGROUP_INET4_POST_BIND) + AttachCGroupInet6PostBind = AttachType(sys.BPF_CGROUP_INET6_POST_BIND) + AttachCGroupUDP4Sendmsg = AttachType(sys.BPF_CGROUP_UDP4_SENDMSG) + AttachCGroupUDP6Sendmsg = AttachType(sys.BPF_CGROUP_UDP6_SENDMSG) + AttachLircMode2 = AttachType(sys.BPF_LIRC_MODE2) + AttachFlowDissector = AttachType(sys.BPF_FLOW_DISSECTOR) + AttachCGroupSysctl = AttachType(sys.BPF_CGROUP_SYSCTL) + AttachCGroupUDP4Recvmsg = AttachType(sys.BPF_CGROUP_UDP4_RECVMSG) + AttachCGroupUDP6Recvmsg = AttachType(sys.BPF_CGROUP_UDP6_RECVMSG) + AttachCGroupGetsockopt = AttachType(sys.BPF_CGROUP_GETSOCKOPT) + AttachCGroupSetsockopt = AttachType(sys.BPF_CGROUP_SETSOCKOPT) + AttachTraceRawTp = AttachType(sys.BPF_TRACE_RAW_TP) + AttachTraceFEntry = AttachType(sys.BPF_TRACE_FENTRY) + AttachTraceFExit = AttachType(sys.BPF_TRACE_FEXIT) + AttachModifyReturn = AttachType(sys.BPF_MODIFY_RETURN) + AttachLSMMac = AttachType(sys.BPF_LSM_MAC) + AttachTraceIter = AttachType(sys.BPF_TRACE_ITER) + AttachCgroupInet4GetPeername = AttachType(sys.BPF_CGROUP_INET4_GETPEERNAME) + AttachCgroupInet6GetPeername = AttachType(sys.BPF_CGROUP_INET6_GETPEERNAME) + AttachCgroupInet4GetSockname = AttachType(sys.BPF_CGROUP_INET4_GETSOCKNAME) + AttachCgroupInet6GetSockname = AttachType(sys.BPF_CGROUP_INET6_GETSOCKNAME) + AttachXDPDevMap = AttachType(sys.BPF_XDP_DEVMAP) + AttachCgroupInetSockRelease = AttachType(sys.BPF_CGROUP_INET_SOCK_RELEASE) + AttachXDPCPUMap = AttachType(sys.BPF_XDP_CPUMAP) + AttachSkLookup = AttachType(sys.BPF_SK_LOOKUP) + AttachXDP = AttachType(sys.BPF_XDP) + AttachSkSKBVerdict = AttachType(sys.BPF_SK_SKB_VERDICT) + AttachSkReuseportSelect = AttachType(sys.BPF_SK_REUSEPORT_SELECT) + AttachSkReuseportSelectOrMigrate = AttachType(sys.BPF_SK_REUSEPORT_SELECT_OR_MIGRATE) + AttachPerfEvent = AttachType(sys.BPF_PERF_EVENT) + AttachTraceKprobeMulti = AttachType(sys.BPF_TRACE_KPROBE_MULTI) + AttachTraceKprobeSession = AttachType(sys.BPF_TRACE_KPROBE_SESSION) + AttachLSMCgroup = AttachType(sys.BPF_LSM_CGROUP) + AttachStructOps = AttachType(sys.BPF_STRUCT_OPS) + AttachNetfilter = AttachType(sys.BPF_NETFILTER) + AttachTCXIngress = AttachType(sys.BPF_TCX_INGRESS) + AttachTCXEgress = AttachType(sys.BPF_TCX_EGRESS) + AttachTraceUprobeMulti = AttachType(sys.BPF_TRACE_UPROBE_MULTI) + AttachCgroupUnixConnect = AttachType(sys.BPF_CGROUP_UNIX_CONNECT) + AttachCgroupUnixSendmsg = AttachType(sys.BPF_CGROUP_UNIX_SENDMSG) + AttachCgroupUnixRecvmsg = AttachType(sys.BPF_CGROUP_UNIX_RECVMSG) + AttachCgroupUnixGetpeername = AttachType(sys.BPF_CGROUP_UNIX_GETPEERNAME) + AttachCgroupUnixGetsockname = AttachType(sys.BPF_CGROUP_UNIX_GETSOCKNAME) + AttachNetkitPrimary = AttachType(sys.BPF_NETKIT_PRIMARY) + AttachNetkitPeer = AttachType(sys.BPF_NETKIT_PEER) +) + +// Attach types (Windows). +// +// See https://github.com/microsoft/ebpf-for-windows/blob/main/include/ebpf_structs.h#L260 const ( - AttachCGroupInetIngress AttachType = iota - AttachCGroupInetEgress - AttachCGroupInetSockCreate - AttachCGroupSockOps - AttachSkSKBStreamParser - AttachSkSKBStreamVerdict - AttachCGroupDevice - AttachSkMsgVerdict - AttachCGroupInet4Bind - AttachCGroupInet6Bind - AttachCGroupInet4Connect - AttachCGroupInet6Connect - AttachCGroupInet4PostBind - AttachCGroupInet6PostBind - AttachCGroupUDP4Sendmsg - AttachCGroupUDP6Sendmsg - AttachLircMode2 - AttachFlowDissector - AttachCGroupSysctl - AttachCGroupUDP4Recvmsg - AttachCGroupUDP6Recvmsg - AttachCGroupGetsockopt - AttachCGroupSetsockopt - AttachTraceRawTp - AttachTraceFEntry - AttachTraceFExit - AttachModifyReturn - AttachLSMMac - AttachTraceIter - AttachCgroupInet4GetPeername - AttachCgroupInet6GetPeername - AttachCgroupInet4GetSockname - AttachCgroupInet6GetSockname - AttachXDPDevMap - AttachCgroupInetSockRelease - AttachXDPCPUMap - AttachSkLookup - AttachXDP - AttachSkSKBVerdict - AttachSkReuseportSelect - AttachSkReuseportSelectOrMigrate - AttachPerfEvent + AttachWindowsXDP = AttachType(platform.WindowsTag | iota + 1) + AttachWindowsBind + AttachWindowsCGroupInet4Connect + AttachWindowsCGroupInet6Connect + AttachWindowsCgroupInet4RecvAccept + AttachWindowsCgroupInet6RecvAccept + AttachWindowsCGroupSockOps + AttachWindowsSample + AttachWindowsXDPTest ) +// AttachTypeForPlatform returns a platform specific attach type. +// +// Use this if the library doesn't provide a constant yet. +func AttachTypeForPlatform(plat string, value uint32) (AttachType, error) { + return platform.EncodeConstant[AttachType](plat, value) +} + // AttachFlags of the eBPF program used in BPF_PROG_ATTACH command type AttachFlags uint32 // PinType determines whether a map is pinned into a BPFFS. -type PinType int +type PinType uint32 // Valid pin types. // @@ -271,10 +374,10 @@ func (lpo *LoadPinOptions) Marshal() uint32 { flags := lpo.Flags if lpo.ReadOnly { - flags |= unix.BPF_F_RDONLY + flags |= sys.BPF_F_RDONLY } if lpo.WriteOnly { - flags |= unix.BPF_F_WRONLY + flags |= sys.BPF_F_WRONLY } return flags } @@ -288,3 +391,20 @@ type BatchOptions struct { ElemFlags uint64 Flags uint64 } + +// LogLevel controls the verbosity of the kernel's eBPF program verifier. +// These constants can be used for the ProgramOptions.LogLevel field. +type LogLevel = sys.LogLevel + +const ( + // Print verifier state at branch points. + LogLevelBranch = sys.BPF_LOG_LEVEL1 + + // Print verifier state for every instruction. + // Available since Linux v5.2. + LogLevelInstruction = sys.BPF_LOG_LEVEL2 + + // Print verifier errors and stats at the end of the verification process. + // Available since Linux v5.2. + LogLevelStats = sys.BPF_LOG_STATS +) diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/types_string.go b/src/nvcgo/vendor/github.com/cilium/ebpf/types_string.go index e80b948b0..94bc2e26c 100644 --- a/src/nvcgo/vendor/github.com/cilium/ebpf/types_string.go +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/types_string.go @@ -38,18 +38,45 @@ func _() { _ = x[RingBuf-27] _ = x[InodeStorage-28] _ = x[TaskStorage-29] - _ = x[maxMapType-30] + _ = x[BloomFilter-30] + _ = x[UserRingbuf-31] + _ = x[CgroupStorage-32] + _ = x[Arena-33] + _ = x[WindowsHash-268435457] + _ = x[WindowsArray-268435458] + _ = x[WindowsProgramArray-268435459] + _ = x[WindowsPerCPUHash-268435460] + _ = x[WindowsPerCPUArray-268435461] + _ = x[WindowsHashOfMaps-268435462] + _ = x[WindowsArrayOfMaps-268435463] + _ = x[WindowsLRUHash-268435464] + _ = x[WindowsLPMTrie-268435465] + _ = x[WindowsQueue-268435466] + _ = x[WindowsLRUCPUHash-268435467] + _ = x[WindowsStack-268435468] + _ = x[WindowsRingBuf-268435469] } -const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHashStructOpsMapRingBufInodeStorageTaskStoragemaxMapType" +const ( + _MapType_name_0 = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHashStructOpsMapRingBufInodeStorageTaskStorageBloomFilterUserRingbufCgroupStorageArena" + _MapType_name_1 = "WindowsHashWindowsArrayWindowsProgramArrayWindowsPerCPUHashWindowsPerCPUArrayWindowsHashOfMapsWindowsArrayOfMapsWindowsLRUHashWindowsLPMTrieWindowsQueueWindowsLRUCPUHashWindowsStackWindowsRingBuf" +) -var _MapType_index = [...]uint16{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248, 260, 267, 279, 290, 300} +var ( + _MapType_index_0 = [...]uint16{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248, 260, 267, 279, 290, 301, 312, 325, 330} + _MapType_index_1 = [...]uint8{0, 11, 23, 42, 59, 77, 94, 112, 126, 140, 152, 169, 181, 195} +) func (i MapType) String() string { - if i >= MapType(len(_MapType_index)-1) { + switch { + case i <= 33: + return _MapType_name_0[_MapType_index_0[i]:_MapType_index_0[i+1]] + case 268435457 <= i && i <= 268435469: + i -= 268435457 + return _MapType_name_1[_MapType_index_1[i]:_MapType_index_1[i+1]] + default: return "MapType(" + strconv.FormatInt(int64(i), 10) + ")" } - return _MapType_name[_MapType_index[i]:_MapType_index[i+1]] } func _() { // An "invalid array index" compiler error signifies that the constant values have changed. @@ -87,18 +114,40 @@ func _() { _ = x[LSM-29] _ = x[SkLookup-30] _ = x[Syscall-31] - _ = x[maxProgramType-32] + _ = x[Netfilter-32] + _ = x[WindowsXDP-268435457] + _ = x[WindowsBind-268435458] + _ = x[WindowsCGroupSockAddr-268435459] + _ = x[WindowsSockOps-268435460] + _ = x[WindowsXDPTest-268436454] + _ = x[WindowsSample-268436455] } -const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscallmaxProgramType" +const ( + _ProgramType_name_0 = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscallNetfilter" + _ProgramType_name_1 = "WindowsXDPWindowsBindWindowsCGroupSockAddrWindowsSockOps" + _ProgramType_name_2 = "WindowsXDPTestWindowsSample" +) -var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301, 315} +var ( + _ProgramType_index_0 = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301, 310} + _ProgramType_index_1 = [...]uint8{0, 10, 21, 42, 56} + _ProgramType_index_2 = [...]uint8{0, 14, 27} +) func (i ProgramType) String() string { - if i >= ProgramType(len(_ProgramType_index)-1) { + switch { + case i <= 32: + return _ProgramType_name_0[_ProgramType_index_0[i]:_ProgramType_index_0[i+1]] + case 268435457 <= i && i <= 268435460: + i -= 268435457 + return _ProgramType_name_1[_ProgramType_index_1[i]:_ProgramType_index_1[i+1]] + case 268436454 <= i && i <= 268436455: + i -= 268436454 + return _ProgramType_name_2[_ProgramType_index_2[i]:_ProgramType_index_2[i+1]] + default: return "ProgramType(" + strconv.FormatInt(int64(i), 10) + ")" } - return _ProgramType_name[_ProgramType_index[i]:_ProgramType_index[i+1]] } func _() { // An "invalid array index" compiler error signifies that the constant values have changed. @@ -113,8 +162,9 @@ const _PinType_name = "PinNonePinByName" var _PinType_index = [...]uint8{0, 7, 16} func (i PinType) String() string { - if i < 0 || i >= PinType(len(_PinType_index)-1) { + idx := int(i) - 0 + if i < 0 || idx >= len(_PinType_index)-1 { return "PinType(" + strconv.FormatInt(int64(i), 10) + ")" } - return _PinType_name[_PinType_index[i]:_PinType_index[i+1]] + return _PinType_name[_PinType_index[idx]:_PinType_index[idx+1]] } diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/types_windows.go b/src/nvcgo/vendor/github.com/cilium/ebpf/types_windows.go new file mode 100644 index 000000000..0b7e836b0 --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/types_windows.go @@ -0,0 +1,57 @@ +package ebpf + +import ( + "fmt" + "os" + + "golang.org/x/sys/windows" + + "github.com/cilium/ebpf/internal/efw" + "github.com/cilium/ebpf/internal/platform" +) + +// WindowsProgramTypeForGUID resolves a GUID to a ProgramType. +// +// The GUID must be in the form of "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}". +// +// Returns an error wrapping [os.ErrNotExist] if the GUID is not recignized. +func WindowsProgramTypeForGUID(guid string) (ProgramType, error) { + progTypeGUID, err := windows.GUIDFromString(guid) + if err != nil { + return 0, fmt.Errorf("parse GUID: %w", err) + } + + rawProgramType, err := efw.EbpfGetBpfProgramType(progTypeGUID) + if err != nil { + return 0, fmt.Errorf("get program type: %w", err) + } + + if rawProgramType == 0 { + return 0, fmt.Errorf("program type not found for GUID %v: %w", guid, os.ErrNotExist) + } + + return ProgramTypeForPlatform(platform.Windows, rawProgramType) +} + +// WindowsAttachTypeForGUID resolves a GUID to an AttachType. +// +// The GUID must be in the form of "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}". +// +// Returns an error wrapping [os.ErrNotExist] if the GUID is not recignized. +func WindowsAttachTypeForGUID(guid string) (AttachType, error) { + attachTypeGUID, err := windows.GUIDFromString(guid) + if err != nil { + return 0, fmt.Errorf("parse GUID: %w", err) + } + + rawAttachType, err := efw.EbpfGetBpfAttachType(attachTypeGUID) + if err != nil { + return 0, fmt.Errorf("get attach type: %w", err) + } + + if rawAttachType == 0 { + return 0, fmt.Errorf("attach type not found for GUID %v: %w", attachTypeGUID, os.ErrNotExist) + } + + return AttachTypeForPlatform(platform.Windows, rawAttachType) +} diff --git a/src/nvcgo/vendor/github.com/cilium/ebpf/variable.go b/src/nvcgo/vendor/github.com/cilium/ebpf/variable.go new file mode 100644 index 000000000..c6fd55cba --- /dev/null +++ b/src/nvcgo/vendor/github.com/cilium/ebpf/variable.go @@ -0,0 +1,270 @@ +package ebpf + +import ( + "fmt" + "io" + "reflect" + + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal/sysenc" +) + +// VariableSpec is a convenience wrapper for modifying global variables of a +// CollectionSpec before loading it into the kernel. +// +// All operations on a VariableSpec's underlying MapSpec are performed in the +// host's native endianness. +type VariableSpec struct { + name string + offset uint64 + size uint64 + + m *MapSpec + t *btf.Var +} + +// Set sets the value of the VariableSpec to the provided input using the host's +// native endianness. +func (s *VariableSpec) Set(in any) error { + buf, err := sysenc.Marshal(in, int(s.size)) + if err != nil { + return fmt.Errorf("marshaling value %s: %w", s.name, err) + } + + b, _, err := s.m.dataSection() + if err != nil { + return fmt.Errorf("getting data section of map %s: %w", s.m.Name, err) + } + + if int(s.offset+s.size) > len(b) { + return fmt.Errorf("offset %d(+%d) for variable %s is out of bounds", s.offset, s.size, s.name) + } + + // MapSpec.Copy() performs a shallow copy. Fully copy the byte slice + // to avoid any changes affecting other copies of the MapSpec. + cpy := make([]byte, len(b)) + copy(cpy, b) + + buf.CopyTo(cpy[s.offset : s.offset+s.size]) + + s.m.Contents[0] = MapKV{Key: uint32(0), Value: cpy} + + return nil +} + +// Get writes the value of the VariableSpec to the provided output using the +// host's native endianness. +func (s *VariableSpec) Get(out any) error { + b, _, err := s.m.dataSection() + if err != nil { + return fmt.Errorf("getting data section of map %s: %w", s.m.Name, err) + } + + if int(s.offset+s.size) > len(b) { + return fmt.Errorf("offset %d(+%d) for variable %s is out of bounds", s.offset, s.size, s.name) + } + + if err := sysenc.Unmarshal(out, b[s.offset:s.offset+s.size]); err != nil { + return fmt.Errorf("unmarshaling value: %w", err) + } + + return nil +} + +// Size returns the size of the variable in bytes. +func (s *VariableSpec) Size() uint64 { + return s.size +} + +// MapName returns the name of the underlying MapSpec. +func (s *VariableSpec) MapName() string { + return s.m.Name +} + +// Offset returns the offset of the variable in the underlying MapSpec. +func (s *VariableSpec) Offset() uint64 { + return s.offset +} + +// Constant returns true if the VariableSpec represents a variable that is +// read-only from the perspective of the BPF program. +func (s *VariableSpec) Constant() bool { + return s.m.readOnly() +} + +// Type returns the [btf.Var] representing the variable in its data section. +// This is useful for inspecting the variable's decl tags and the type +// information of the inner type. +// +// Returns nil if the original ELF object did not contain BTF information. +func (s *VariableSpec) Type() *btf.Var { + return s.t +} + +func (s *VariableSpec) String() string { + return fmt.Sprintf("%s (type=%v, map=%s, offset=%d, size=%d)", s.name, s.t, s.m.Name, s.offset, s.size) +} + +// copy returns a new VariableSpec with the same values as the original, +// but with a different underlying MapSpec. This is useful when copying a +// CollectionSpec. Returns nil if a MapSpec with the same name is not found. +func (s *VariableSpec) copy(cpy *CollectionSpec) *VariableSpec { + out := &VariableSpec{ + name: s.name, + offset: s.offset, + size: s.size, + } + if s.t != nil { + out.t = btf.Copy(s.t).(*btf.Var) + } + + // Attempt to find a MapSpec with the same name in the copied CollectionSpec. + for _, m := range cpy.Maps { + if m.Name == s.m.Name { + out.m = m + return out + } + } + + return nil +} + +// Variable is a convenience wrapper for modifying global variables of a +// Collection after loading it into the kernel. Operations on a Variable are +// performed using direct memory access, bypassing the BPF map syscall API. +// +// On kernels older than 5.5, most interactions with Variable return +// [ErrNotSupported]. +type Variable struct { + name string + offset uint64 + size uint64 + t *btf.Var + + mm *Memory +} + +func newVariable(name string, offset, size uint64, t *btf.Var, mm *Memory) (*Variable, error) { + if mm != nil { + if int(offset+size) > mm.Size() { + return nil, fmt.Errorf("offset %d(+%d) is out of bounds", offset, size) + } + } + + return &Variable{ + name: name, + offset: offset, + size: size, + t: t, + mm: mm, + }, nil +} + +// Size returns the size of the variable. +func (v *Variable) Size() uint64 { + return v.size +} + +// ReadOnly returns true if the Variable represents a variable that is read-only +// after loading the Collection into the kernel. +// +// On systems without BPF_F_MMAPABLE support, ReadOnly always returns true. +func (v *Variable) ReadOnly() bool { + if v.mm == nil { + return true + } + return v.mm.ReadOnly() +} + +// Type returns the [btf.Var] representing the variable in its data section. +// This is useful for inspecting the variable's decl tags and the type +// information of the inner type. +// +// Returns nil if the original ELF object did not contain BTF information. +func (v *Variable) Type() *btf.Var { + return v.t +} + +func (v *Variable) String() string { + return fmt.Sprintf("%s (type=%v)", v.name, v.t) +} + +// Set the value of the Variable to the provided input. The input must marshal +// to the same length as the size of the Variable. +func (v *Variable) Set(in any) error { + if v.mm == nil { + return fmt.Errorf("variable %s: direct access requires Linux 5.5 or later: %w", v.name, ErrNotSupported) + } + + if v.ReadOnly() { + return fmt.Errorf("variable %s: %w", v.name, ErrReadOnly) + } + + if !v.mm.bounds(v.offset, v.size) { + return fmt.Errorf("variable %s: access out of bounds: %w", v.name, io.EOF) + } + + buf, err := sysenc.Marshal(in, int(v.size)) + if err != nil { + return fmt.Errorf("marshaling value %s: %w", v.name, err) + } + + if _, err := v.mm.WriteAt(buf.Bytes(), int64(v.offset)); err != nil { + return fmt.Errorf("writing value to %s: %w", v.name, err) + } + + return nil +} + +// Get writes the value of the Variable to the provided output. The output must +// be a pointer to a value whose size matches the Variable. +func (v *Variable) Get(out any) error { + if v.mm == nil { + return fmt.Errorf("variable %s: direct access requires Linux 5.5 or later: %w", v.name, ErrNotSupported) + } + + if !v.mm.bounds(v.offset, v.size) { + return fmt.Errorf("variable %s: access out of bounds: %w", v.name, io.EOF) + } + + if err := sysenc.Unmarshal(out, v.mm.b[v.offset:v.offset+v.size]); err != nil { + return fmt.Errorf("unmarshaling value %s: %w", v.name, err) + } + + return nil +} + +func checkVariable[T any](v *Variable) error { + if v.ReadOnly() { + return ErrReadOnly + } + + t := reflect.TypeFor[T]() + size := uint64(t.Size()) + if t.Kind() == reflect.Uintptr && v.size == 8 { + // uintptr is 8 bytes on 64-bit and 4 on 32-bit. In BPF/BTF, pointers are + // always 8 bytes. For the sake of portability, allow accessing 8-byte BPF + // variables as uintptr on 32-bit systems, since the upper 32 bits of the + // pointer should be zero anyway. + return nil + } + if v.size != size { + return fmt.Errorf("can't create %d-byte accessor to %d-byte variable: %w", size, v.size, ErrInvalidType) + } + + return nil +} + +// VariablePointer returns a pointer to a variable of type T backed by memory +// shared with the BPF program. Requires building the Go application with -tags +// ebpf_unsafe_memory_experiment. +// +// T must contain only fixed-size, non-Go-pointer types: bools, floats, +// (u)int[8-64], arrays, and structs containing them. Structs must embed +// [structs.HostLayout]. [ErrInvalidType] is returned if T is not a valid type. +func VariablePointer[T comparable](v *Variable) (*T, error) { + if err := checkVariable[T](v); err != nil { + return nil, fmt.Errorf("variable pointer %s: %w", v.name, err) + } + return memoryPointer[T](v.mm, v.offset) +} diff --git a/src/nvcgo/vendor/golang.org/x/sys/LICENSE b/src/nvcgo/vendor/golang.org/x/sys/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/LICENSE +++ b/src/nvcgo/vendor/golang.org/x/sys/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/README.md b/src/nvcgo/vendor/golang.org/x/sys/unix/README.md index 7d3c060e1..6e08a76a7 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/README.md +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/README.md @@ -156,7 +156,7 @@ from the generated architecture-specific files listed below, and merge these into a common file for each OS. The merge is performed in the following steps: -1. Construct the set of common code that is idential in all architecture-specific files. +1. Construct the set of common code that is identical in all architecture-specific files. 2. Write this common code to the merged file. 3. Remove the common code from all architecture-specific files. diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/affinity_linux.go b/src/nvcgo/vendor/golang.org/x/sys/unix/affinity_linux.go index 6e5c81acd..3ea470387 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/affinity_linux.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/affinity_linux.go @@ -38,8 +38,15 @@ func SchedSetaffinity(pid int, set *CPUSet) error { // Zero clears the set s, so that it contains no CPUs. func (s *CPUSet) Zero() { + clear(s[:]) +} + +// Fill adds all possible CPU bits to the set s. On Linux, [SchedSetaffinity] +// will silently ignore any invalid CPU bits in [CPUSet] so this is an +// efficient way of resetting the CPU affinity of a process. +func (s *CPUSet) Fill() { for i := range s { - s[i] = 0 + s[i] = ^cpuMask(0) } } diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/auxv.go b/src/nvcgo/vendor/golang.org/x/sys/unix/auxv.go new file mode 100644 index 000000000..37a82528f --- /dev/null +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/auxv.go @@ -0,0 +1,36 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import ( + "syscall" + "unsafe" +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +// Auxv returns the ELF auxiliary vector as a sequence of key/value pairs. +// The returned slice is always a fresh copy, owned by the caller. +// It returns an error on non-ELF platforms, or if the auxiliary vector cannot be accessed, +// which happens in some locked-down environments and build modes. +func Auxv() ([][2]uintptr, error) { + vec := runtime_getAuxv() + vecLen := len(vec) + + if vecLen == 0 { + return nil, syscall.ENOENT + } + + if vecLen%2 != 0 { + return nil, syscall.EINVAL + } + + result := make([]uintptr, vecLen) + copy(result, vec) + return unsafe.Slice((*[2]uintptr)(unsafe.Pointer(&result[0])), vecLen/2), nil +} diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/auxv_unsupported.go b/src/nvcgo/vendor/golang.org/x/sys/unix/auxv_unsupported.go new file mode 100644 index 000000000..1200487f2 --- /dev/null +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/auxv_unsupported.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import "syscall" + +func Auxv() ([][2]uintptr, error) { + return nil, syscall.ENOTSUP +} diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/fdset.go b/src/nvcgo/vendor/golang.org/x/sys/unix/fdset.go index 9e83d18cd..62ed12645 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/fdset.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/fdset.go @@ -23,7 +23,5 @@ func (fds *FdSet) IsSet(fd int) bool { // Zero clears the set fds. func (fds *FdSet) Zero() { - for i := range fds.Bits { - fds.Bits[i] = 0 - } + clear(fds.Bits[:]) } diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ifreq_linux.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ifreq_linux.go index 848840ae4..309f5a2b0 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ifreq_linux.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -111,9 +111,7 @@ func (ifr *Ifreq) SetUint32(v uint32) { // clear zeroes the ifreq's union field to prevent trailing garbage data from // being sent to the kernel if an ifreq is reused. func (ifr *Ifreq) clear() { - for i := range ifr.raw.Ifru { - ifr.raw.Ifru[i] = 0 - } + clear(ifr.raw.Ifru[:]) } // TODO(mdlayher): export as IfreqData? For now we can provide helpers such as diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ioctl_linux.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ioctl_linux.go index dbe680eab..7ca4fa12a 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -58,6 +58,102 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) { return &value, err } +// IoctlGetEthtoolTsInfo fetches ethtool timestamping and PHC +// association for the network device specified by ifname. +func IoctlGetEthtoolTsInfo(fd int, ifname string) (*EthtoolTsInfo, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := EthtoolTsInfo{Cmd: ETHTOOL_GET_TS_INFO} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd) + return &value, err +} + +// IoctlGetHwTstamp retrieves the hardware timestamping configuration +// for the network device specified by ifname. +func IoctlGetHwTstamp(fd int, ifname string) (*HwTstampConfig, error) { + ifr, err := NewIfreq(ifname) + if err != nil { + return nil, err + } + + value := HwTstampConfig{} + ifrd := ifr.withData(unsafe.Pointer(&value)) + + err = ioctlIfreqData(fd, SIOCGHWTSTAMP, &ifrd) + return &value, err +} + +// IoctlSetHwTstamp updates the hardware timestamping configuration for +// the network device specified by ifname. +func IoctlSetHwTstamp(fd int, ifname string, cfg *HwTstampConfig) error { + ifr, err := NewIfreq(ifname) + if err != nil { + return err + } + ifrd := ifr.withData(unsafe.Pointer(cfg)) + return ioctlIfreqData(fd, SIOCSHWTSTAMP, &ifrd) +} + +// FdToClockID derives the clock ID from the file descriptor number +// - see clock_gettime(3), FD_TO_CLOCKID macros. The resulting ID is +// suitable for system calls like ClockGettime. +func FdToClockID(fd int) int32 { return int32((int(^fd) << 3) | 3) } + +// IoctlPtpClockGetcaps returns the description of a given PTP device. +func IoctlPtpClockGetcaps(fd int) (*PtpClockCaps, error) { + var value PtpClockCaps + err := ioctlPtr(fd, PTP_CLOCK_GETCAPS2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetPrecise returns a description of the clock +// offset compared to the system clock. +func IoctlPtpSysOffsetPrecise(fd int) (*PtpSysOffsetPrecise, error) { + var value PtpSysOffsetPrecise + err := ioctlPtr(fd, PTP_SYS_OFFSET_PRECISE2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpSysOffsetExtended returns an extended description of the +// clock offset compared to the system clock. The samples parameter +// specifies the desired number of measurements. +func IoctlPtpSysOffsetExtended(fd int, samples uint) (*PtpSysOffsetExtended, error) { + value := PtpSysOffsetExtended{Samples: uint32(samples)} + err := ioctlPtr(fd, PTP_SYS_OFFSET_EXTENDED2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinGetfunc returns the configuration of the specified +// I/O pin on given PTP device. +func IoctlPtpPinGetfunc(fd int, index uint) (*PtpPinDesc, error) { + value := PtpPinDesc{Index: uint32(index)} + err := ioctlPtr(fd, PTP_PIN_GETFUNC2, unsafe.Pointer(&value)) + return &value, err +} + +// IoctlPtpPinSetfunc updates configuration of the specified PTP +// I/O pin. +func IoctlPtpPinSetfunc(fd int, pd *PtpPinDesc) error { + return ioctlPtr(fd, PTP_PIN_SETFUNC2, unsafe.Pointer(pd)) +} + +// IoctlPtpPeroutRequest configures the periodic output mode of the +// PTP I/O pins. +func IoctlPtpPeroutRequest(fd int, r *PtpPeroutRequest) error { + return ioctlPtr(fd, PTP_PEROUT_REQUEST2, unsafe.Pointer(r)) +} + +// IoctlPtpExttsRequest configures the external timestamping mode +// of the PTP I/O pins. +func IoctlPtpExttsRequest(fd int, r *PtpExttsRequest) error { + return ioctlPtr(fd, PTP_EXTTS_REQUEST2, unsafe.Pointer(r)) +} + // IoctlGetWatchdogInfo fetches information about a watchdog device from the // Linux watchdog API. For more information, see: // https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html. diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/mkall.sh b/src/nvcgo/vendor/golang.org/x/sys/unix/mkall.sh index e6f31d374..d0ed61191 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/mkall.sh +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/mkall.sh @@ -49,6 +49,7 @@ esac if [[ "$GOOS" = "linux" ]]; then # Use the Docker-based build system # Files generated through docker (use $cmd so you can Ctl-C the build or run) + set -e $cmd docker build --tag generate:$GOOS $GOOS $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS exit diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/mkerrors.sh b/src/nvcgo/vendor/golang.org/x/sys/unix/mkerrors.sh index 4ed2e488b..d1c8b2640 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -58,6 +58,7 @@ includes_Darwin=' #define _DARWIN_USE_64_BIT_INODE #define __APPLE_USE_RFC_3542 #include +#include #include #include #include @@ -157,6 +158,16 @@ includes_Linux=' #endif #define _GNU_SOURCE +// See the description in unix/linux/types.go +#if defined(__ARM_EABI__) || \ + (defined(__mips__) && (_MIPS_SIM == _ABIO32)) || \ + (defined(__powerpc__) && (!defined(__powerpc64__))) +# ifdef _TIME_BITS +# undef _TIME_BITS +# endif +# define _TIME_BITS 32 +#endif + // is broken on powerpc64, as it fails to include definitions of // these structures. We just include them copied from . #if defined(__powerpc__) @@ -255,6 +266,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -337,6 +349,9 @@ struct ltchars { #define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN) #define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN) +// Renamed in v6.16, commit c6d732c38f93 ("net: ethtool: remove duplicate defines for family info") +#define ETHTOOL_FAMILY_NAME ETHTOOL_GENL_NAME +#define ETHTOOL_FAMILY_VERSION ETHTOOL_GENL_VERSION ' includes_NetBSD=' @@ -526,6 +541,7 @@ ccflags="$@" $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || + $2 ~ /^PTP_/ || $2 ~ /^RAW_PAYLOAD_/ || $2 ~ /^[US]F_/ || $2 ~ /^TP_STATUS_/ || @@ -551,6 +567,7 @@ ccflags="$@" $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || + $2 ~ /^(CONNECT|SAE)_/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || @@ -654,7 +671,7 @@ errors=$( signals=$( echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort ) @@ -664,7 +681,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags | sort >_error.grep echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort >_signal.grep echo '// mkerrors.sh' "$@" diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/mremap.go b/src/nvcgo/vendor/golang.org/x/sys/unix/mremap.go index fd45fe529..3a5e776f8 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/mremap.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/mremap.go @@ -50,3 +50,8 @@ func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data [ func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { return mapper.Mremap(oldData, newLength, flags) } + +func MremapPtr(oldAddr unsafe.Pointer, oldSize uintptr, newAddr unsafe.Pointer, newSize uintptr, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mremap(uintptr(oldAddr), oldSize, newSize, flags, uintptr(newAddr)) + return unsafe.Pointer(xaddr), err +} diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_aix.go b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_aix.go index 67ce6cef2..6f15ba1ea 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -360,7 +360,7 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, var status _C_int var r Pid_t err = ERESTART - // AIX wait4 may return with ERESTART errno, while the processus is still + // AIX wait4 may return with ERESTART errno, while the process is still // active. for err == ERESTART { r, err = wait4(Pid_t(pid), &status, options, rusage) diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_darwin.go b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_darwin.go index 59542a897..7838ca5db 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -402,6 +402,18 @@ func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error { return ioctlPtr(fd, SIOCSIFMTU, unsafe.Pointer(ifreq)) } +//sys renamexNp(from string, to string, flag uint32) (err error) + +func RenamexNp(from string, to string, flag uint32) (err error) { + return renamexNp(from, to, flag) +} + +//sys renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) + +func RenameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + return renameatxNp(fromfd, from, tofd, to, flag) +} + //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL func Uname(uname *Utsname) error { @@ -542,6 +554,144 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { } } +//sys pthread_chdir_np(path string) (err error) + +func PthreadChdir(path string) (err error) { + return pthread_chdir_np(path) +} + +//sys pthread_fchdir_np(fd int) (err error) + +func PthreadFchdir(fd int) (err error) { + return pthread_fchdir_np(fd) +} + +// Connectx calls connectx(2) to initiate a connection on a socket. +// +// srcIf, srcAddr, and dstAddr are filled into a [SaEndpoints] struct and passed as the endpoints argument. +// +// - srcIf is the optional source interface index. 0 means unspecified. +// - srcAddr is the optional source address. nil means unspecified. +// - dstAddr is the destination address. +// +// On success, Connectx returns the number of bytes enqueued for transmission. +func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocID, flags uint32, iov []Iovec, connid *SaeConnID) (n uintptr, err error) { + endpoints := SaEndpoints{ + Srcif: srcIf, + } + + if srcAddr != nil { + addrp, addrlen, err := srcAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Srcaddr = (*RawSockaddr)(addrp) + endpoints.Srcaddrlen = uint32(addrlen) + } + + if dstAddr != nil { + addrp, addrlen, err := dstAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Dstaddr = (*RawSockaddr)(addrp) + endpoints.Dstaddrlen = uint32(addrlen) + } + + err = connectx(fd, &endpoints, associd, flags, iov, &n, connid) + return +} + +const minIovec = 8 + +func Readv(fd int, iovs [][]byte) (n int, err error) { + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + n, err = readv(fd, iovecs) + readvRacedetect(iovecs, n, err) + return n, err +} + +func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + n, err = preadv(fd, iovecs, offset) + readvRacedetect(iovecs, n, err) + return n, err +} + +func Writev(fd int, iovs [][]byte) (n int, err error) { + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + n, err = writev(fd, iovecs) + writevRacedetect(iovecs, n) + return n, err +} + +func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + n, err = pwritev(fd, iovecs, offset) + writevRacedetect(iovecs, n) + return n, err +} + +func appendBytes(vecs []Iovec, bs [][]byte) []Iovec { + for _, b := range bs { + var v Iovec + v.SetLen(len(b)) + if len(b) > 0 { + v.Base = &b[0] + } else { + v.Base = (*byte)(unsafe.Pointer(&_zero)) + } + vecs = append(vecs, v) + } + return vecs +} + +func writevRacedetect(iovecs []Iovec, n int) { + if !raceenabled { + return + } + for i := 0; n > 0 && i < len(iovecs); i++ { + m := int(iovecs[i].Len) + if m > n { + m = n + } + n -= m + if m > 0 { + raceReadRange(unsafe.Pointer(iovecs[i].Base), m) + } + } +} + +func readvRacedetect(iovecs []Iovec, n int, err error) { + if !raceenabled { + return + } + for i := 0; n > 0 && i < len(iovecs); i++ { + m := int(iovecs[i].Len) + if m > n { + m = n + } + n -= m + if m > 0 { + raceWriteRange(unsafe.Pointer(iovecs[i].Base), m) + } + } + if err == nil { + raceAcquire(unsafe.Pointer(&ioSync)) + } +} + +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) @@ -644,3 +794,7 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) +//sys readv(fd int, iovecs []Iovec) (n int, err error) +//sys preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) +//sys writev(fd int, iovecs []Iovec) (n int, err error) +//sys pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 97cb916f2..be8c00207 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -246,6 +246,18 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return sendfile(outfd, infd, offset, count) } +func Dup3(oldfd, newfd, flags int) error { + if oldfd == newfd || flags&^O_CLOEXEC != 0 { + return EINVAL + } + how := F_DUP2FD + if flags&O_CLOEXEC != 0 { + how = F_DUP2FD_CLOEXEC + } + _, err := fcntl(oldfd, how, newfd) + return err +} + /* * Exposed directly */ diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_hurd.go b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_hurd.go index ba46651f8..a6a2d2fc2 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -11,6 +11,7 @@ package unix int ioctl(int, unsigned long int, uintptr_t); */ import "C" +import "unsafe" func ioctl(fd int, req uint, arg uintptr) (err error) { r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg)) diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_linux.go b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_linux.go index 5682e2628..9439af961 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -13,6 +13,7 @@ package unix import ( "encoding/binary" + "slices" "strconv" "syscall" "time" @@ -417,7 +418,7 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { return nil, 0, EINVAL } sa.raw.Family = AF_UNIX - for i := 0; i < n; i++ { + for i := range n { sa.raw.Path[i] = int8(name[i]) } // length is family (uint16), name, NUL. @@ -507,7 +508,7 @@ func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) { psm := (*[2]byte)(unsafe.Pointer(&sa.raw.Psm)) psm[0] = byte(sa.PSM) psm[1] = byte(sa.PSM >> 8) - for i := 0; i < len(sa.Addr); i++ { + for i := range len(sa.Addr) { sa.raw.Bdaddr[i] = sa.Addr[len(sa.Addr)-1-i] } cid := (*[2]byte)(unsafe.Pointer(&sa.raw.Cid)) @@ -589,11 +590,11 @@ func (sa *SockaddrCAN) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_CAN sa.raw.Ifindex = int32(sa.Ifindex) rx := (*[4]byte)(unsafe.Pointer(&sa.RxID)) - for i := 0; i < 4; i++ { + for i := range 4 { sa.raw.Addr[i] = rx[i] } tx := (*[4]byte)(unsafe.Pointer(&sa.TxID)) - for i := 0; i < 4; i++ { + for i := range 4 { sa.raw.Addr[i+4] = tx[i] } return unsafe.Pointer(&sa.raw), SizeofSockaddrCAN, nil @@ -618,11 +619,11 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_CAN sa.raw.Ifindex = int32(sa.Ifindex) n := (*[8]byte)(unsafe.Pointer(&sa.Name)) - for i := 0; i < 8; i++ { + for i := range 8 { sa.raw.Addr[i] = n[i] } p := (*[4]byte)(unsafe.Pointer(&sa.PGN)) - for i := 0; i < 4; i++ { + for i := range 4 { sa.raw.Addr[i+8] = p[i] } sa.raw.Addr[12] = sa.Addr @@ -800,9 +801,7 @@ func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) { // one. The kernel expects SID to be in network byte order. binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID) copy(sa.raw[8:14], sa.Remote) - for i := 14; i < 14+IFNAMSIZ; i++ { - sa.raw[i] = 0 - } + clear(sa.raw[14 : 14+IFNAMSIZ]) copy(sa.raw[14:], sa.Dev) return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil } @@ -911,7 +910,7 @@ func (sa *SockaddrIUCV) sockaddr() (unsafe.Pointer, _Socklen, error) { // These are EBCDIC encoded by the kernel, but we still need to pad them // with blanks. Initializing with blanks allows the caller to feed in either // a padded or an unpadded string. - for i := 0; i < 8; i++ { + for i := range 8 { sa.raw.Nodeid[i] = ' ' sa.raw.User_id[i] = ' ' sa.raw.Name[i] = ' ' @@ -1148,7 +1147,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { var user [8]byte var name [8]byte - for i := 0; i < 8; i++ { + for i := range 8 { user[i] = byte(pp.User_id[i]) name[i] = byte(pp.Name[i]) } @@ -1173,11 +1172,11 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { Ifindex: int(pp.Ifindex), } name := (*[8]byte)(unsafe.Pointer(&sa.Name)) - for i := 0; i < 8; i++ { + for i := range 8 { name[i] = pp.Addr[i] } pgn := (*[4]byte)(unsafe.Pointer(&sa.PGN)) - for i := 0; i < 4; i++ { + for i := range 4 { pgn[i] = pp.Addr[i+8] } addr := (*[1]byte)(unsafe.Pointer(&sa.Addr)) @@ -1188,11 +1187,11 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { Ifindex: int(pp.Ifindex), } rx := (*[4]byte)(unsafe.Pointer(&sa.RxID)) - for i := 0; i < 4; i++ { + for i := range 4 { rx[i] = pp.Addr[i] } tx := (*[4]byte)(unsafe.Pointer(&sa.TxID)) - for i := 0; i < 4; i++ { + for i := range 4 { tx[i] = pp.Addr[i+4] } return sa, nil @@ -1295,6 +1294,48 @@ func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) { return &value, err } +// GetsockoptTCPCCVegasInfo returns algorithm specific congestion control information for a socket using the "vegas" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCVegasInfo(fd, level, opt int) (*TCPVegasInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPVegasInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCDCTCPInfo returns algorithm specific congestion control information for a socket using the "dctp" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCDCTCPInfo(fd, level, opt int) (*TCPDCTCPInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPDCTCPInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCBBRInfo returns algorithm specific congestion control information for a socket using the "bbr" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCBBRInfo(fd, level, opt int) (*TCPBBRInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPBBRInfo)(unsafe.Pointer(&value[0])) + return out, err +} + // GetsockoptString returns the string value of the socket option opt for the // socket associated with fd at the given socket level. func GetsockoptString(fd, level, opt int) (string, error) { @@ -1818,6 +1859,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys ClockAdjtime(clockid int32, buf *Timex) (state int, err error) //sys ClockGetres(clockid int32, res *Timespec) (err error) //sys ClockGettime(clockid int32, time *Timespec) (err error) +//sys ClockSettime(clockid int32, time *Timespec) (err error) //sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) //sys Close(fd int) (err error) //sys CloseRange(first uint, last uint, flags uint) (err error) @@ -1959,7 +2001,26 @@ func Getpgrp() (pid int) { //sysnb Getpid() (pid int) //sysnb Getppid() (ppid int) //sys Getpriority(which int, who int) (prio int, err error) -//sys Getrandom(buf []byte, flags int) (n int, err error) + +func Getrandom(buf []byte, flags int) (n int, err error) { + vdsoRet, supported := vgetrandom(buf, uint32(flags)) + if supported { + if vdsoRet < 0 { + return 0, errnoErr(syscall.Errno(-vdsoRet)) + } + return vdsoRet, nil + } + var p *byte + if len(buf) > 0 { + p = &buf[0] + } + r, _, e := Syscall(SYS_GETRANDOM, uintptr(unsafe.Pointer(p)), uintptr(len(buf)), uintptr(flags)) + if e != 0 { + return 0, errnoErr(e) + } + return int(r), nil +} + //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getsid(pid int) (sid int, err error) //sysnb Gettid() (tid int) @@ -2154,10 +2215,7 @@ func readvRacedetect(iovecs []Iovec, n int, err error) { return } for i := 0; n > 0 && i < len(iovecs); i++ { - m := int(iovecs[i].Len) - if m > n { - m = n - } + m := min(int(iovecs[i].Len), n) n -= m if m > 0 { raceWriteRange(unsafe.Pointer(iovecs[i].Base), m) @@ -2208,10 +2266,7 @@ func writevRacedetect(iovecs []Iovec, n int) { return } for i := 0; n > 0 && i < len(iovecs); i++ { - m := int(iovecs[i].Len) - if m > n { - m = n - } + m := min(int(iovecs[i].Len), n) n -= m if m > 0 { raceReadRange(unsafe.Pointer(iovecs[i].Base), m) @@ -2258,12 +2313,7 @@ func isGroupMember(gid int) bool { return false } - for _, g := range groups { - if g == gid { - return true - } - } - return false + return slices.Contains(groups, gid) } func isCapDacOverrideSet() bool { @@ -2592,3 +2642,4 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { } //sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) +//sys Mseal(b []byte, flags uint) (err error) diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index cf2ee6c75..745e5c7e6 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -182,3 +182,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 3d0e98451..dd2262a40 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -214,3 +214,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 6f5a28894..8cf3670bd 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -187,3 +187,5 @@ func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error } return riscvHWProbe(pairs, setSize, set, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 88162099a..34a467697 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -248,6 +248,23 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { return Statvfs1(path, buf, ST_WAIT) } +func Getvfsstat(buf []Statvfs_t, flags int) (n int, err error) { + var ( + _p0 unsafe.Pointer + bufsize uintptr + ) + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + bufsize = unsafe.Sizeof(Statvfs_t{}) * uintptr(len(buf)) + } + r0, _, e1 := Syscall(SYS_GETVFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + /* * Exposed directly */ diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_openbsd.go index b25343c71..b86ded549 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -293,6 +293,7 @@ func Uname(uname *Utsname) error { //sys Mkfifoat(dirfd int, path string, mode uint32) (err error) //sys Mknod(path string, mode uint32, dev int) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) +//sys Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys Open(path string, mode int, perm uint32) (fd int, err error) //sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_solaris.go b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_solaris.go index 21974af06..18a3d9bda 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -629,7 +629,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Kill(pid int, signum syscall.Signal) (err error) //sys Lchown(path string, uid int, gid int) (err error) //sys Link(path string, link string) (err error) -//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_llisten +//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_listen //sys Lstat(path string, stat *Stat_t) (err error) //sys Madvise(b []byte, advice int) (err error) //sys Mkdir(path string, mode uint32) (err error) @@ -1102,3 +1102,90 @@ func (s *Strioctl) SetInt(i int) { func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) { return ioctlPtrRet(fd, req, unsafe.Pointer(s)) } + +// Ucred Helpers +// See ucred(3c) and getpeerucred(3c) + +//sys getpeerucred(fd uintptr, ucred *uintptr) (err error) +//sys ucredFree(ucred uintptr) = ucred_free +//sys ucredGet(pid int) (ucred uintptr, err error) = ucred_get +//sys ucredGeteuid(ucred uintptr) (uid int) = ucred_geteuid +//sys ucredGetegid(ucred uintptr) (gid int) = ucred_getegid +//sys ucredGetruid(ucred uintptr) (uid int) = ucred_getruid +//sys ucredGetrgid(ucred uintptr) (gid int) = ucred_getrgid +//sys ucredGetsuid(ucred uintptr) (uid int) = ucred_getsuid +//sys ucredGetsgid(ucred uintptr) (gid int) = ucred_getsgid +//sys ucredGetpid(ucred uintptr) (pid int) = ucred_getpid + +// Ucred is an opaque struct that holds user credentials. +type Ucred struct { + ucred uintptr +} + +// We need to ensure that ucredFree is called on the underlying ucred +// when the Ucred is garbage collected. +func ucredFinalizer(u *Ucred) { + ucredFree(u.ucred) +} + +func GetPeerUcred(fd uintptr) (*Ucred, error) { + var ucred uintptr + err := getpeerucred(fd, &ucred) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func UcredGet(pid int) (*Ucred, error) { + ucred, err := ucredGet(pid) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func (u *Ucred) Geteuid() int { + defer runtime.KeepAlive(u) + return ucredGeteuid(u.ucred) +} + +func (u *Ucred) Getruid() int { + defer runtime.KeepAlive(u) + return ucredGetruid(u.ucred) +} + +func (u *Ucred) Getsuid() int { + defer runtime.KeepAlive(u) + return ucredGetsuid(u.ucred) +} + +func (u *Ucred) Getegid() int { + defer runtime.KeepAlive(u) + return ucredGetegid(u.ucred) +} + +func (u *Ucred) Getrgid() int { + defer runtime.KeepAlive(u) + return ucredGetrgid(u.ucred) +} + +func (u *Ucred) Getsgid() int { + defer runtime.KeepAlive(u) + return ucredGetsgid(u.ucred) +} + +func (u *Ucred) Getpid() int { + defer runtime.KeepAlive(u) + return ucredGetpid(u.ucred) +} diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_unix.go b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_unix.go index 77081de8c..4e92e5aa4 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -154,6 +154,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 312ae6ac1..7bf5c04bb 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -768,6 +768,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) @@ -816,10 +825,10 @@ func Lstat(path string, stat *Stat_t) (err error) { // for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/ func isSpecialPath(path []byte) (v bool) { var special = [4][8]byte{ - [8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, - [8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, - [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} + {'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, + {'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, + {'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} var i, j int for i = 0; i < len(special); i++ { @@ -3115,3 +3124,90 @@ func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) { //sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT //sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT //sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT + +func fcntlAsIs(fd uintptr, cmd int, arg uintptr) (val int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), arg) + runtime.ExitSyscall() + val = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +func Fcntl(fd uintptr, cmd int, op interface{}) (ret int, err error) { + switch op.(type) { + case *Flock_t: + err = FcntlFlock(fd, cmd, op.(*Flock_t)) + if err != nil { + ret = -1 + } + return + case int: + return FcntlInt(fd, cmd, op.(int)) + case *F_cnvrt: + return fcntlAsIs(fd, cmd, uintptr(unsafe.Pointer(op.(*F_cnvrt)))) + case unsafe.Pointer: + return fcntlAsIs(fd, cmd, uintptr(op.(unsafe.Pointer))) + default: + return -1, EINVAL + } + return +} + +func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + return sendfile(outfd, infd, offset, count) +} + +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + // TODO: use LE call instead if the call is implemented + originalOffset, err := Seek(infd, 0, SEEK_CUR) + if err != nil { + return -1, err + } + //start reading data from in_fd + if offset != nil { + _, err := Seek(infd, *offset, SEEK_SET) + if err != nil { + return -1, err + } + } + + buf := make([]byte, count) + readBuf := make([]byte, 0) + var n int = 0 + for i := 0; i < count; i += n { + n, err := Read(infd, buf) + if n == 0 { + if err != nil { + return -1, err + } else { // EOF + break + } + } + readBuf = append(readBuf, buf...) + buf = buf[0:0] + } + + n2, err := Write(outfd, readBuf) + if err != nil { + return -1, err + } + + //When sendfile() returns, this variable will be set to the + // offset of the byte following the last byte that was read. + if offset != nil { + *offset = *offset + int64(n) + // If offset is not NULL, then sendfile() does not modify the file + // offset of in_fd + _, err := Seek(infd, originalOffset, SEEK_SET) + if err != nil { + return -1, err + } + } + return n2, nil +} diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/vgetrandom_linux.go b/src/nvcgo/vendor/golang.org/x/sys/unix/vgetrandom_linux.go new file mode 100644 index 000000000..07ac8e09d --- /dev/null +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/vgetrandom_linux.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && go1.24 + +package unix + +import _ "unsafe" + +//go:linkname vgetrandom runtime.vgetrandom +//go:noescape +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go b/src/nvcgo/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go new file mode 100644 index 000000000..297e97bce --- /dev/null +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go @@ -0,0 +1,11 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux || !go1.24 + +package unix + +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) { + return -1, false +} diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index e40fa8524..d73c4652e 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1169,6 +1172,11 @@ const ( PT_WRITE_D = 0x5 PT_WRITE_I = 0x4 PT_WRITE_U = 0x6 + RENAME_EXCL = 0x4 + RENAME_NOFOLLOW_ANY = 0x10 + RENAME_RESERVED1 = 0x8 + RENAME_SECLUDE = 0x1 + RENAME_SWAP = 0x2 RLIMIT_AS = 0x5 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1260,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index bb02aa6c0..4a55a4005 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1169,6 +1172,11 @@ const ( PT_WRITE_D = 0x5 PT_WRITE_I = 0x4 PT_WRITE_U = 0x6 + RENAME_EXCL = 0x4 + RENAME_NOFOLLOW_ANY = 0x10 + RENAME_RESERVED1 = 0x8 + RENAME_SECLUDE = 0x1 + RENAME_SWAP = 0x2 RLIMIT_AS = 0x5 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1260,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux.go index 877a62b47..b6db27d93 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -319,11 +319,17 @@ const ( AUDIT_INTEGRITY_POLICY_RULE = 0x70f AUDIT_INTEGRITY_RULE = 0x70d AUDIT_INTEGRITY_STATUS = 0x70a + AUDIT_INTEGRITY_USERSPACE = 0x710 AUDIT_IPC = 0x517 AUDIT_IPC_SET_PERM = 0x51f + AUDIT_IPE_ACCESS = 0x58c + AUDIT_IPE_CONFIG_CHANGE = 0x58d + AUDIT_IPE_POLICY_LOAD = 0x58e AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERN_MODULE = 0x532 + AUDIT_LANDLOCK_ACCESS = 0x58f + AUDIT_LANDLOCK_DOMAIN = 0x590 AUDIT_LAST_FEATURE = 0x1 AUDIT_LAST_KERN_ANOM_MSG = 0x707 AUDIT_LAST_USER_MSG = 0x4af @@ -457,6 +463,7 @@ const ( B600 = 0x8 B75 = 0x2 B9600 = 0xd + BCACHEFS_SUPER_MAGIC = 0xca451a4e BDEVFS_MAGIC = 0x62646576 BINDERFS_SUPER_MAGIC = 0x6c6f6f70 BINFMTFS_MAGIC = 0x42494e4d @@ -487,13 +494,16 @@ const ( BPF_F_BEFORE = 0x8 BPF_F_ID = 0x20 BPF_F_NETFILTER_IP_DEFRAG = 0x1 + BPF_F_PREORDER = 0x40 BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_TEST_REG_INVARIANTS = 0x80 BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 + BPF_F_TEST_SKB_CHECKSUM_COMPLETE = 0x4 BPF_F_TEST_STATE_FREQ = 0x8 BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 BPF_F_XDP_DEV_BOUND_ONLY = 0x40 @@ -521,6 +531,7 @@ const ( BPF_LDX = 0x1 BPF_LEN = 0x80 BPF_LL_OFF = -0x200000 + BPF_LOAD_ACQ = 0x100 BPF_LSH = 0x60 BPF_MAJOR_VERSION = 0x1 BPF_MAXINSNS = 0x1000 @@ -548,6 +559,7 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_ST = 0x2 + BPF_STORE_REL = 0x110 BPF_STX = 0x3 BPF_SUB = 0x10 BPF_TAG_SIZE = 0x8 @@ -837,9 +849,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2023-03-01)" + DM_VERSION_EXTRA = "-ioctl (2025-04-28)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x30 + DM_VERSION_MINOR = 0x32 DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -928,12 +940,12 @@ const ( EPOLL_CTL_ADD = 0x1 EPOLL_CTL_DEL = 0x2 EPOLL_CTL_MOD = 0x3 + EPOLL_IOC_TYPE = 0x8a EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2 - ESP_V4_FLOW = 0xa - ESP_V6_FLOW = 0xc - ETHER_FLOW = 0x12 ETHTOOL_BUSINFO_LEN = 0x20 ETHTOOL_EROMVERS_LEN = 0x20 + ETHTOOL_FAMILY_NAME = "ethtool" + ETHTOOL_FAMILY_VERSION = 0x1 ETHTOOL_FEC_AUTO = 0x2 ETHTOOL_FEC_BASER = 0x10 ETHTOOL_FEC_LLRS = 0x20 @@ -941,9 +953,6 @@ const ( ETHTOOL_FEC_OFF = 0x4 ETHTOOL_FEC_RS = 0x8 ETHTOOL_FLAG_ALL = 0x7 - ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 - ETHTOOL_FLAG_OMIT_REPLY = 0x2 - ETHTOOL_FLAG_STATS = 0x4 ETHTOOL_FLASHDEV = 0x33 ETHTOOL_FLASH_MAX_FILENAME = 0x80 ETHTOOL_FWVERS_LEN = 0x20 @@ -1166,6 +1175,7 @@ const ( EXTA = 0xe EXTB = 0xf F2FS_SUPER_MAGIC = 0xf2f52010 + FALLOC_FL_ALLOCATE_RANGE = 0x0 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -1198,13 +1208,18 @@ const ( FAN_DENY = 0x2 FAN_ENABLE_AUDIT = 0x40 FAN_EPIDFD = -0x2 + FAN_ERRNO_BITS = 0x8 + FAN_ERRNO_MASK = 0xff + FAN_ERRNO_SHIFT = 0x18 FAN_EVENT_INFO_TYPE_DFID = 0x3 FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_ERROR = 0x5 FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_INFO_TYPE_MNT = 0x7 FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa FAN_EVENT_INFO_TYPE_PIDFD = 0x4 + FAN_EVENT_INFO_TYPE_RANGE = 0x6 FAN_EVENT_METADATA_LEN = 0x18 FAN_EVENT_ON_CHILD = 0x8000000 FAN_FS_ERROR = 0x8000 @@ -1219,9 +1234,12 @@ const ( FAN_MARK_IGNORED_SURV_MODIFY = 0x40 FAN_MARK_IGNORE_SURV = 0x440 FAN_MARK_INODE = 0x0 + FAN_MARK_MNTNS = 0x110 FAN_MARK_MOUNT = 0x10 FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 + FAN_MNT_ATTACH = 0x1000000 + FAN_MNT_DETACH = 0x2000000 FAN_MODIFY = 0x2 FAN_MOVE = 0xc0 FAN_MOVED_FROM = 0x40 @@ -1235,12 +1253,15 @@ const ( FAN_OPEN_EXEC = 0x1000 FAN_OPEN_EXEC_PERM = 0x40000 FAN_OPEN_PERM = 0x10000 + FAN_PRE_ACCESS = 0x100000 FAN_Q_OVERFLOW = 0x4000 FAN_RENAME = 0x10000000 FAN_REPORT_DFID_NAME = 0xc00 FAN_REPORT_DFID_NAME_TARGET = 0x1e00 FAN_REPORT_DIR_FID = 0x400 + FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 + FAN_REPORT_MNT = 0x4000 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 FAN_REPORT_TARGET_FID = 0x1000 @@ -1260,6 +1281,7 @@ const ( FIB_RULE_PERMANENT = 0x1 FIB_RULE_UNRESOLVED = 0x4 FIDEDUPERANGE = 0xc0189436 + FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED = 0x1 FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8 @@ -1325,8 +1347,10 @@ const ( FUSE_SUPER_MAGIC = 0x65735546 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 + F_CREATED_QUERY = 0x404 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 + F_DUPFD_QUERY = 0x403 F_EXLCK = 0x4 F_GETFD = 0x1 F_GETFL = 0x3 @@ -1546,6 +1570,7 @@ const ( IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e IPPROTO_SCTP = 0x84 + IPPROTO_SMC = 0x100 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 @@ -1565,7 +1590,6 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b - IPV6_FLOW = 0x11 IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 @@ -1616,8 +1640,9 @@ const ( IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 IPV6_UNICAST_IF = 0x4c - IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 IP_ADD_SOURCE_MEMBERSHIP = 0x27 @@ -1676,7 +1701,6 @@ const ( IP_TTL = 0x2 IP_UNBLOCK_SOURCE = 0x25 IP_UNICAST_IF = 0x32 - IP_USER_FLOW = 0xd IP_XFRM_POLICY = 0x11 ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 @@ -1705,6 +1729,7 @@ const ( KEXEC_ARCH_S390 = 0x160000 KEXEC_ARCH_SH = 0x2a0000 KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_CRASH_HOTPLUG_SUPPORT = 0x8 KEXEC_FILE_DEBUG = 0x8 KEXEC_FILE_NO_INITRAMFS = 0x4 KEXEC_FILE_ON_CRASH = 0x2 @@ -1780,6 +1805,7 @@ const ( KEY_SPEC_USER_KEYRING = -0x4 KEY_SPEC_USER_SESSION_KEYRING = -0x5 LANDLOCK_ACCESS_FS_EXECUTE = 0x1 + LANDLOCK_ACCESS_FS_IOCTL_DEV = 0x8000 LANDLOCK_ACCESS_FS_MAKE_BLOCK = 0x800 LANDLOCK_ACCESS_FS_MAKE_CHAR = 0x40 LANDLOCK_ACCESS_FS_MAKE_DIR = 0x80 @@ -1796,7 +1822,13 @@ const ( LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 + LANDLOCK_CREATE_RULESET_ERRATA = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON = 0x2 + LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF = 0x4 + LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 + LANDLOCK_SCOPE_SIGNAL = 0x2 LINUX_REBOOT_CMD_CAD_OFF = 0x0 LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef LINUX_REBOOT_CMD_HALT = 0xcdef0123 @@ -1858,9 +1890,23 @@ const ( MADV_UNMERGEABLE = 0xd MADV_WILLNEED = 0x3 MADV_WIPEONFORK = 0x12 + MAP_DROPPABLE = 0x8 MAP_FILE = 0x0 MAP_FIXED = 0x10 MAP_FIXED_NOREPLACE = 0x100000 + MAP_HUGE_16GB = 0x88000000 + MAP_HUGE_16KB = 0x38000000 + MAP_HUGE_16MB = 0x60000000 + MAP_HUGE_1GB = 0x78000000 + MAP_HUGE_1MB = 0x50000000 + MAP_HUGE_256MB = 0x70000000 + MAP_HUGE_2GB = 0x7c000000 + MAP_HUGE_2MB = 0x54000000 + MAP_HUGE_32MB = 0x64000000 + MAP_HUGE_512KB = 0x4c000000 + MAP_HUGE_512MB = 0x74000000 + MAP_HUGE_64KB = 0x40000000 + MAP_HUGE_8MB = 0x5c000000 MAP_HUGE_MASK = 0x3f MAP_HUGE_SHIFT = 0x1a MAP_PRIVATE = 0x2 @@ -1908,6 +1954,8 @@ const ( MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 MNT_ID_REQ_SIZE_VER0 = 0x18 + MNT_ID_REQ_SIZE_VER1 = 0x20 + MNT_NS_INFO_SIZE_VER0 = 0x10 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -1943,6 +1991,7 @@ const ( MSG_PEEK = 0x2 MSG_PROXY = 0x10 MSG_RST = 0x1000 + MSG_SOCK_DEVMEM = 0x2000000 MSG_SYN = 0x400 MSG_TRUNC = 0x20 MSG_TRYHARD = 0x4 @@ -2059,6 +2108,7 @@ const ( NFC_ATR_REQ_MAXSIZE = 0x40 NFC_ATR_RES_GB_MAXSIZE = 0x2f NFC_ATR_RES_MAXSIZE = 0x40 + NFC_ATS_MAXSIZE = 0x14 NFC_COMM_ACTIVE = 0x0 NFC_COMM_PASSIVE = 0x1 NFC_DEVICE_NAME_MAXSIZE = 0x8 @@ -2139,6 +2189,7 @@ const ( NFNL_SUBSYS_QUEUE = 0x3 NFNL_SUBSYS_ULOG = 0x4 NFS_SUPER_MAGIC = 0x6969 + NFT_BITWISE_BOOL = 0x0 NFT_CHAIN_FLAGS = 0x7 NFT_CHAIN_MAXNAMELEN = 0x100 NFT_CT_MAX = 0x17 @@ -2173,7 +2224,7 @@ const ( NFT_REG_SIZE = 0x10 NFT_REJECT_ICMPX_MAX = 0x3 NFT_RT_MAX = 0x4 - NFT_SECMARK_CTX_MAXLEN = 0x100 + NFT_SECMARK_CTX_MAXLEN = 0x1000 NFT_SET_MAXNAMELEN = 0x100 NFT_SOCKET_MAX = 0x3 NFT_TABLE_F_MASK = 0x7 @@ -2342,9 +2393,11 @@ const ( PERF_MEM_LVLNUM_IO = 0xa PERF_MEM_LVLNUM_L1 = 0x1 PERF_MEM_LVLNUM_L2 = 0x2 + PERF_MEM_LVLNUM_L2_MHB = 0x5 PERF_MEM_LVLNUM_L3 = 0x3 PERF_MEM_LVLNUM_L4 = 0x4 PERF_MEM_LVLNUM_LFB = 0xc + PERF_MEM_LVLNUM_MSC = 0x6 PERF_MEM_LVLNUM_NA = 0xf PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_RAM = 0xd @@ -2417,6 +2470,7 @@ const ( PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROCFS_IOCTL_MAGIC = 'f' PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 @@ -2448,6 +2502,10 @@ const ( PR_FP_EXC_UND = 0x40000 PR_FP_MODE_FR = 0x1 PR_FP_MODE_FRE = 0x2 + PR_FUTEX_HASH = 0x4e + PR_FUTEX_HASH_GET_IMMUTABLE = 0x3 + PR_FUTEX_HASH_GET_SLOTS = 0x2 + PR_FUTEX_HASH_SET_SLOTS = 0x1 PR_GET_AUXV = 0x41555856 PR_GET_CHILD_SUBREAPER = 0x25 PR_GET_DUMPABLE = 0x3 @@ -2464,6 +2522,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SHADOW_STACK_STATUS = 0x4a PR_GET_SPECULATION_CTRL = 0x34 PR_GET_TAGGED_ADDR_CTRL = 0x38 PR_GET_THP_DISABLE = 0x2a @@ -2472,6 +2531,7 @@ const ( PR_GET_TIMING = 0xd PR_GET_TSC = 0x19 PR_GET_UNALIGN = 0x5 + PR_LOCK_SHADOW_STACK_STATUS = 0x4c PR_MCE_KILL = 0x21 PR_MCE_KILL_CLEAR = 0x0 PR_MCE_KILL_DEFAULT = 0x2 @@ -2498,6 +2558,25 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_PMLEN_MASK = 0x7f000000 + PR_PMLEN_SHIFT = 0x18 + PR_PPC_DEXCR_CTRL_CLEAR = 0x4 + PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10 + PR_PPC_DEXCR_CTRL_EDITABLE = 0x1 + PR_PPC_DEXCR_CTRL_MASK = 0x1f + PR_PPC_DEXCR_CTRL_SET = 0x2 + PR_PPC_DEXCR_CTRL_SET_ONEXEC = 0x8 + PR_PPC_DEXCR_IBRTPD = 0x1 + PR_PPC_DEXCR_NPHIE = 0x3 + PR_PPC_DEXCR_SBHE = 0x0 + PR_PPC_DEXCR_SRAPD = 0x2 + PR_PPC_GET_DEXCR = 0x48 + PR_PPC_SET_DEXCR = 0x49 + PR_RISCV_CTX_SW_FENCEI_OFF = 0x1 + PR_RISCV_CTX_SW_FENCEI_ON = 0x0 + PR_RISCV_SCOPE_PER_PROCESS = 0x0 + PR_RISCV_SCOPE_PER_THREAD = 0x1 + PR_RISCV_SET_ICACHE_FLUSH_CTX = 0x47 PR_RISCV_V_GET_CONTROL = 0x46 PR_RISCV_V_SET_CONTROL = 0x45 PR_RISCV_V_VSTATE_CTRL_CUR_MASK = 0x3 @@ -2548,6 +2627,7 @@ const ( PR_SET_PTRACER = 0x59616d61 PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SHADOW_STACK_STATUS = 0x4b PR_SET_SPECULATION_CTRL = 0x35 PR_SET_SYSCALL_USER_DISPATCH = 0x3b PR_SET_TAGGED_ADDR_CTRL = 0x37 @@ -2558,6 +2638,9 @@ const ( PR_SET_UNALIGN = 0x6 PR_SET_VMA = 0x53564d41 PR_SET_VMA_ANON_NAME = 0x0 + PR_SHADOW_STACK_ENABLE = 0x1 + PR_SHADOW_STACK_PUSH = 0x4 + PR_SHADOW_STACK_WRITE = 0x2 PR_SME_GET_VL = 0x40 PR_SME_SET_VL = 0x3f PR_SME_SET_VL_ONEXEC = 0x40000 @@ -2582,6 +2665,10 @@ const ( PR_TAGGED_ADDR_ENABLE = 0x1 PR_TASK_PERF_EVENTS_DISABLE = 0x1f PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMER_CREATE_RESTORE_IDS = 0x4d + PR_TIMER_CREATE_RESTORE_IDS_GET = 0x2 + PR_TIMER_CREATE_RESTORE_IDS_OFF = 0x0 + PR_TIMER_CREATE_RESTORE_IDS_ON = 0x1 PR_TIMING_STATISTICAL = 0x0 PR_TIMING_TIMESTAMP = 0x1 PR_TSC_ENABLE = 0x1 @@ -2589,6 +2676,28 @@ const ( PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 PSTOREFS_MAGIC = 0x6165676c + PTP_CLK_MAGIC = '=' + PTP_ENABLE_FEATURE = 0x1 + PTP_EXTTS_EDGES = 0x6 + PTP_EXTTS_EVENT_VALID = 0x1 + PTP_EXTTS_V1_VALID_FLAGS = 0x7 + PTP_EXTTS_VALID_FLAGS = 0x1f + PTP_EXT_OFFSET = 0x10 + PTP_FALLING_EDGE = 0x4 + PTP_MAX_SAMPLES = 0x19 + PTP_PEROUT_DUTY_CYCLE = 0x2 + PTP_PEROUT_ONE_SHOT = 0x1 + PTP_PEROUT_PHASE = 0x4 + PTP_PEROUT_V1_VALID_FLAGS = 0x0 + PTP_PEROUT_VALID_FLAGS = 0x7 + PTP_PIN_GETFUNC = 0xc0603d06 + PTP_PIN_GETFUNC2 = 0xc0603d0f + PTP_RISING_EDGE = 0x2 + PTP_STRICT_FLAGS = 0x8 + PTP_SYS_OFFSET_EXTENDED = 0xc4c03d09 + PTP_SYS_OFFSET_EXTENDED2 = 0xc4c03d12 + PTP_SYS_OFFSET_PRECISE = 0xc0403d08 + PTP_SYS_OFFSET_PRECISE2 = 0xc0403d11 PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 @@ -2640,6 +2749,7 @@ const ( PTRACE_SETREGSET = 0x4205 PTRACE_SETSIGINFO = 0x4203 PTRACE_SETSIGMASK = 0x420b + PTRACE_SET_SYSCALL_INFO = 0x4212 PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG = 0x4210 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 @@ -2703,7 +2813,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1e + RTA_MAX = 0x1f RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -2780,10 +2890,12 @@ const ( RTM_DELACTION = 0x31 RTM_DELADDR = 0x15 RTM_DELADDRLABEL = 0x49 + RTM_DELANYCAST = 0x3d RTM_DELCHAIN = 0x65 RTM_DELLINK = 0x11 RTM_DELLINKPROP = 0x6d RTM_DELMDB = 0x55 + RTM_DELMULTICAST = 0x39 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 RTM_DELNEXTHOP = 0x69 @@ -2833,11 +2945,13 @@ const ( RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 + RTM_NEWANYCAST = 0x3c RTM_NEWCACHEREPORT = 0x60 RTM_NEWCHAIN = 0x64 RTM_NEWLINK = 0x10 RTM_NEWLINKPROP = 0x6c RTM_NEWMDB = 0x54 + RTM_NEWMULTICAST = 0x38 RTM_NEWNDUSEROPT = 0x44 RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 @@ -2845,7 +2959,6 @@ const ( RTM_NEWNEXTHOP = 0x68 RTM_NEWNEXTHOPBUCKET = 0x74 RTM_NEWNSID = 0x58 - RTM_NEWNVLAN = 0x70 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 RTM_NEWROUTE = 0x18 @@ -2854,6 +2967,7 @@ const ( RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c RTM_NEWTUNNEL = 0x78 + RTM_NEWVLAN = 0x70 RTM_NR_FAMILIES = 0x1b RTM_NR_MSGTYPES = 0x6c RTM_SETDCB = 0x4f @@ -2886,6 +3000,7 @@ const ( RTPROT_NTK = 0xf RTPROT_OPENR = 0x63 RTPROT_OSPF = 0xbc + RTPROT_OVN = 0x54 RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 RTPROT_RIP = 0xbd @@ -2902,15 +3017,18 @@ const ( RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 RWF_APPEND = 0x10 + RWF_ATOMIC = 0x40 + RWF_DONTCACHE = 0x80 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x3f + RWF_SUPPORTED = 0xff RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 SCHED_DEADLINE = 0x6 + SCHED_EXT = 0x7 SCHED_FIFO = 0x1 SCHED_FLAG_ALL = 0x7f SCHED_FLAG_DL_OVERRUN = 0x4 @@ -3179,11 +3297,13 @@ const ( STATX_ATTR_MOUNT_ROOT = 0x2000 STATX_ATTR_NODUMP = 0x40 STATX_ATTR_VERITY = 0x100000 + STATX_ATTR_WRITE_ATOMIC = 0x400000 STATX_BASIC_STATS = 0x7ff STATX_BLOCKS = 0x400 STATX_BTIME = 0x800 STATX_CTIME = 0x80 STATX_DIOALIGN = 0x2000 + STATX_DIO_READ_ALIGN = 0x20000 STATX_GID = 0x10 STATX_INO = 0x100 STATX_MNT_ID = 0x1000 @@ -3192,8 +3312,10 @@ const ( STATX_MTIME = 0x40 STATX_NLINK = 0x4 STATX_SIZE = 0x200 + STATX_SUBVOL = 0x8000 STATX_TYPE = 0x1 STATX_UID = 0x8 + STATX_WRITE_ATOMIC = 0x10000 STATX__RESERVED = 0x80000000 SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 @@ -3233,7 +3355,7 @@ const ( TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0xe + TASKSTATS_VERSION = 0x10 TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 @@ -3303,8 +3425,6 @@ const ( TCP_TX_DELAY = 0x25 TCP_ULP = 0x1f TCP_USER_TIMEOUT = 0x12 - TCP_V4_FLOW = 0x1 - TCP_V6_FLOW = 0x5 TCP_WINDOW_CLAMP = 0xa TCP_ZEROCOPY_RECEIVE = 0x23 TFD_TIMER_ABSTIME = 0x1 @@ -3414,6 +3534,7 @@ const ( TP_STATUS_WRONG_FORMAT = 0x4 TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 + UBI_IOCECNFO = 0xc01c6f06 UDF_SUPER_MAGIC = 0x15013346 UDP_CORK = 0x1 UDP_ENCAP = 0x64 @@ -3426,8 +3547,6 @@ const ( UDP_NO_CHECK6_RX = 0x66 UDP_NO_CHECK6_TX = 0x65 UDP_SEGMENT = 0x67 - UDP_V4_FLOW = 0x2 - UDP_V6_FLOW = 0x6 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff @@ -3470,7 +3589,7 @@ const ( WDIOS_TEMPPANIC = 0x4 WDIOS_UNKNOWN = -0x1 WEXITED = 0x4 - WGALLOWEDIP_A_MAX = 0x3 + WGALLOWEDIP_A_MAX = 0x4 WGDEVICE_A_MAX = 0x8 WGPEER_A_MAX = 0xa WG_CMD_MAX = 0x1 @@ -3584,6 +3703,7 @@ const ( XDP_SHARED_UMEM = 0x1 XDP_STATISTICS = 0x7 XDP_TXMD_FLAGS_CHECKSUM = 0x2 + XDP_TXMD_FLAGS_LAUNCH_TIME = 0x4 XDP_TXMD_FLAGS_TIMESTAMP = 0x1 XDP_TX_METADATA = 0x2 XDP_TX_RING = 0x3 @@ -3592,6 +3712,7 @@ const ( XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 XDP_UMEM_PGOFF_FILL_RING = 0x100000000 XDP_UMEM_REG = 0x4 + XDP_UMEM_TX_METADATA_LEN = 0x4 XDP_UMEM_TX_SW_CSUM = 0x2 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index e4bc0bd57..1c37f9fbc 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -107,12 +110,15 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -151,9 +157,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -230,6 +241,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 PTRACE_GET_THREAD_AREA = 0x19 @@ -276,10 +301,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -314,6 +342,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 @@ -330,6 +361,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -342,6 +374,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 689317afd..6f54d34ae 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -107,12 +110,15 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -151,9 +157,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -230,6 +241,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_ARCH_PRCTL = 0x1e PTRACE_GETFPREGS = 0xe PTRACE_GETFPXREGS = 0x12 @@ -277,10 +302,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -315,6 +343,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 @@ -331,6 +362,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -343,6 +375,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 5cca668ac..783ec5c12 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,12 +109,15 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -148,9 +154,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +238,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETCRUNCHREGS = 0x19 PTRACE_GETFDPIC = 0x1f PTRACE_GETFDPIC_EXEC = 0x0 @@ -282,10 +307,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -320,6 +348,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 @@ -336,6 +367,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -348,6 +380,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 14270508b..ca83d3ba1 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 ESR_MAGIC = 0x45535201 EXTPROC = 0x10000 @@ -107,15 +110,19 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + GCS_MAGIC = 0x47435300 HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -152,9 +159,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -198,6 +210,7 @@ const ( PERF_EVENT_IOC_SET_BPF = 0x40042408 PERF_EVENT_IOC_SET_FILTER = 0x40082406 PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + POE_MAGIC = 0x504f4530 PPPIOCATTACH = 0x4004743d PPPIOCATTCHAN = 0x40047438 PPPIOCBRIDGECHAN = 0x40047435 @@ -233,6 +246,20 @@ const ( PROT_BTI = 0x10 PROT_MTE = 0x20 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_PEEKMTETAGS = 0x21 PTRACE_POKEMTETAGS = 0x22 PTRACE_SYSEMU = 0x1f @@ -273,10 +300,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -311,6 +341,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 @@ -327,6 +360,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -339,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 28e39afdc..607e611c0 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -107,12 +110,15 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -152,9 +158,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -231,6 +242,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 RLIMIT_AS = 0x9 @@ -269,10 +294,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -307,6 +335,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 @@ -323,6 +354,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -335,6 +367,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index cd66e92cb..b9cb5bd3c 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,12 +109,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -148,9 +154,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +238,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -275,10 +300,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -313,6 +341,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 @@ -329,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -341,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index c1595eba7..65b078a63 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,12 +109,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -148,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +238,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -275,10 +300,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -313,6 +341,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 @@ -329,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -341,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ee9456b0d..5298a3033 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,12 +109,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -148,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +238,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -275,10 +300,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -313,6 +341,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 @@ -329,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -341,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 8cfca81e1..7bc557c87 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,12 +109,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x100 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -148,9 +154,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +238,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPREGS = 0xe PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 @@ -275,10 +300,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -313,6 +341,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 @@ -329,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 @@ -341,6 +373,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x1004 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x1006 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x1006 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 60b0deb3a..152399bb0 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 @@ -106,12 +109,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -150,9 +156,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 @@ -230,6 +241,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 @@ -330,10 +355,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -368,6 +396,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 @@ -384,6 +415,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -396,6 +428,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index f90aa7281..1a1ce2409 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 @@ -106,12 +109,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -150,9 +156,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 @@ -230,6 +241,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 @@ -334,10 +359,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -372,6 +400,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 @@ -388,6 +419,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -400,6 +432,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index ba9e01503..4231a1fb5 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x20 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000000 FF1 = 0x4000 @@ -106,12 +109,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x4000 ICANON = 0x100 IEXTEN = 0x400 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -150,9 +156,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 @@ -230,6 +241,20 @@ const ( PPPIOCXFERUNIT = 0x2000744e PROT_SAO = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETEVRREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETREGS64 = 0x16 @@ -334,10 +359,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -372,6 +400,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 @@ -388,6 +419,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 @@ -400,6 +432,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x10 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x12 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x12 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 07cdfd6e9..21c0e9526 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,12 +109,15 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -148,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +238,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_GETFDPIC = 0x21 PTRACE_GETFDPIC_EXEC = 0x0 PTRACE_GETFDPIC_INTERP = 0x1 @@ -266,10 +291,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -304,6 +332,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 @@ -320,6 +351,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -332,6 +364,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 2f1dd214a..f00d1cd7c 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -78,6 +79,8 @@ const ( ECHOPRT = 0x400 EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 + EPIOCGPARAMS = 0x80088a02 + EPIOCSPARAMS = 0x40088a01 EPOLL_CLOEXEC = 0x80000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -106,12 +109,15 @@ const ( HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 + HIDIOCREVOKE = 0x4004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -148,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 @@ -227,6 +238,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x80503d01 + PTP_CLOCK_GETCAPS2 = 0x80503d0a + PTP_ENABLE_PPS = 0x40043d04 + PTP_ENABLE_PPS2 = 0x40043d0d + PTP_EXTTS_REQUEST = 0x40103d02 + PTP_EXTTS_REQUEST2 = 0x40103d0b + PTP_MASK_CLEAR_ALL = 0x3d13 + PTP_MASK_EN_SINGLE = 0x40043d14 + PTP_PEROUT_REQUEST = 0x40383d03 + PTP_PEROUT_REQUEST2 = 0x40383d0c + PTP_PIN_SETFUNC = 0x40603d07 + PTP_PIN_SETFUNC2 = 0x40603d10 + PTP_SYS_OFFSET = 0x43403d05 + PTP_SYS_OFFSET2 = 0x43403d0e PTRACE_DISABLE_TE = 0x5010 PTRACE_ENABLE_TE = 0x5009 PTRACE_GET_LAST_BREAK = 0x5006 @@ -338,10 +363,13 @@ const ( RTC_WIE_ON = 0x700f RTC_WKALM_RD = 0x80287010 RTC_WKALM_SET = 0x4028700f + SCM_DEVMEM_DMABUF = 0x4f + SCM_DEVMEM_LINEAR = 0x4e SCM_TIMESTAMPING = 0x25 SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 @@ -376,6 +404,9 @@ const ( SO_CNX_ADVICE = 0x35 SO_COOKIE = 0x39 SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DEVMEM_DMABUF = 0x4f + SO_DEVMEM_DONTNEED = 0x50 + SO_DEVMEM_LINEAR = 0x4e SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_ERROR = 0x4 @@ -392,6 +423,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 @@ -404,6 +436,7 @@ const ( SO_RCVBUFFORCE = 0x21 SO_RCVLOWAT = 0x12 SO_RCVMARK = 0x4b + SO_RCVPRIORITY = 0x52 SO_RCVTIMEO = 0x14 SO_RCVTIMEO_NEW = 0x42 SO_RCVTIMEO_OLD = 0x14 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index f40519d90..bc8d539e6 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -71,6 +71,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -82,6 +83,8 @@ const ( EFD_CLOEXEC = 0x400000 EFD_NONBLOCK = 0x4000 EMT_TAGOVF = 0x1 + EPIOCGPARAMS = 0x40088a02 + EPIOCSPARAMS = 0x80088a01 EPOLL_CLOEXEC = 0x400000 EXTPROC = 0x10000 FF1 = 0x8000 @@ -110,12 +113,15 @@ const ( HIDIOCGRAWINFO = 0x40084803 HIDIOCGRDESC = 0x50044802 HIDIOCGRDESCSIZE = 0x40044801 + HIDIOCREVOKE = 0x8004480d HUPCL = 0x400 ICANON = 0x2 IEXTEN = 0x8000 IN_CLOEXEC = 0x400000 IN_NONBLOCK = 0x4000 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -153,9 +159,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 @@ -232,6 +243,20 @@ const ( PPPIOCUNBRIDGECHAN = 0x20007434 PPPIOCXFERUNIT = 0x2000744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTP_CLOCK_GETCAPS = 0x40503d01 + PTP_CLOCK_GETCAPS2 = 0x40503d0a + PTP_ENABLE_PPS = 0x80043d04 + PTP_ENABLE_PPS2 = 0x80043d0d + PTP_EXTTS_REQUEST = 0x80103d02 + PTP_EXTTS_REQUEST2 = 0x80103d0b + PTP_MASK_CLEAR_ALL = 0x20003d13 + PTP_MASK_EN_SINGLE = 0x80043d14 + PTP_PEROUT_REQUEST = 0x80383d03 + PTP_PEROUT_REQUEST2 = 0x80383d0c + PTP_PIN_SETFUNC = 0x80603d07 + PTP_PIN_SETFUNC2 = 0x80603d10 + PTP_SYS_OFFSET = 0x83403d05 + PTP_SYS_OFFSET2 = 0x83403d0e PTRACE_GETFPAREGS = 0x14 PTRACE_GETFPREGS = 0xe PTRACE_GETFPREGS64 = 0x19 @@ -329,10 +354,13 @@ const ( RTC_WIE_ON = 0x2000700f RTC_WKALM_RD = 0x40287010 RTC_WKALM_SET = 0x8028700f + SCM_DEVMEM_DMABUF = 0x58 + SCM_DEVMEM_LINEAR = 0x57 SCM_TIMESTAMPING = 0x23 SCM_TIMESTAMPING_OPT_STATS = 0x38 SCM_TIMESTAMPING_PKTINFO = 0x3c SCM_TIMESTAMPNS = 0x21 + SCM_TS_OPT_ID = 0x5a SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 @@ -415,6 +443,9 @@ const ( SO_CNX_ADVICE = 0x37 SO_COOKIE = 0x3b SO_DETACH_REUSEPORT_BPF = 0x47 + SO_DEVMEM_DMABUF = 0x58 + SO_DEVMEM_DONTNEED = 0x59 + SO_DEVMEM_LINEAR = 0x57 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 @@ -431,6 +462,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x2 SO_PASSPIDFD = 0x55 + SO_PASSRIGHTS = 0x5c SO_PASSSEC = 0x1f SO_PEEK_OFF = 0x26 SO_PEERCRED = 0x40 @@ -443,6 +475,7 @@ const ( SO_RCVBUFFORCE = 0x100b SO_RCVLOWAT = 0x800 SO_RCVMARK = 0x54 + SO_RCVPRIORITY = 0x5b SO_RCVTIMEO = 0x2000 SO_RCVTIMEO_NEW = 0x44 SO_RCVTIMEO_OLD = 0x2000 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index da08b2ab3..1ec2b1407 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -581,6 +581,8 @@ const ( AT_EMPTY_PATH = 0x1000 AT_REMOVEDIR = 0x200 RENAME_NOREPLACE = 1 << 0 + ST_RDONLY = 1 + ST_NOSUID = 2 ) const ( diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index ccb02f240..813c05b66 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -740,6 +740,54 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func renamexNp(from string, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_renamex_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renamex_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renamex_np renamex_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameatx_np_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), uintptr(flag), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameatx_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameatx_np renameatx_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -760,6 +808,59 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { @@ -2411,6 +2512,90 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_readv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readv readv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_preadv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_preadv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_preadv preadv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_writev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_writev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pwritev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pwritev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwritev pwritev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(libc_fstat64_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 8b8bb2840..fda328582 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -223,11 +223,36 @@ TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) +TEXT libc_renamex_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renamex_np(SB) +GLOBL ·libc_renamex_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renamex_np_trampoline_addr(SB)/8, $libc_renamex_np_trampoline<>(SB) + +TEXT libc_renameatx_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameatx_np(SB) +GLOBL ·libc_renameatx_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameatx_np_trampoline_addr(SB)/8, $libc_renameatx_np_trampoline<>(SB) + TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 @@ -713,6 +738,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_readv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readv(SB) +GLOBL ·libc_readv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readv_trampoline_addr(SB)/8, $libc_readv_trampoline<>(SB) + +TEXT libc_preadv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_preadv(SB) +GLOBL ·libc_preadv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_preadv_trampoline_addr(SB)/8, $libc_preadv_trampoline<>(SB) + +TEXT libc_writev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_writev(SB) +GLOBL ·libc_writev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_writev_trampoline_addr(SB)/8, $libc_writev_trampoline<>(SB) + +TEXT libc_pwritev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwritev(SB) +GLOBL ·libc_pwritev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwritev_trampoline_addr(SB)/8, $libc_pwritev_trampoline<>(SB) + TEXT libc_fstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat64(SB) GLOBL ·libc_fstat64_trampoline_addr(SB), RODATA, $8 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 1b40b997b..e6f58f3c6 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -740,6 +740,54 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func renamexNp(from string, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_renamex_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renamex_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renamex_np renamex_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func renameatxNp(fromfd int, from string, tofd int, to string, flag uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_renameatx_np_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), uintptr(flag), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_renameatx_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_renameatx_np renameatx_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -760,6 +808,59 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { @@ -2411,6 +2512,90 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_readv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readv readv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_preadv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_preadv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_preadv preadv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_writev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_writev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pwritev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pwritev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwritev pwritev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 08362c1ab..7f8998b90 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -223,11 +223,36 @@ TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) +TEXT libc_renamex_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renamex_np(SB) +GLOBL ·libc_renamex_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renamex_np_trampoline_addr(SB)/8, $libc_renamex_np_trampoline<>(SB) + +TEXT libc_renameatx_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_renameatx_np(SB) +GLOBL ·libc_renameatx_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_renameatx_np_trampoline_addr(SB)/8, $libc_renameatx_np_trampoline<>(SB) + TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 @@ -713,6 +738,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_readv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readv(SB) +GLOBL ·libc_readv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readv_trampoline_addr(SB)/8, $libc_readv_trampoline<>(SB) + +TEXT libc_preadv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_preadv(SB) +GLOBL ·libc_preadv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_preadv_trampoline_addr(SB)/8, $libc_preadv_trampoline<>(SB) + +TEXT libc_writev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_writev(SB) +GLOBL ·libc_writev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_writev_trampoline_addr(SB)/8, $libc_writev_trampoline<>(SB) + +TEXT libc_pwritev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwritev(SB) +GLOBL ·libc_pwritev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwritev_trampoline_addr(SB)/8, $libc_pwritev_trampoline<>(SB) + TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 87d8612a1..5cc1e8eb2 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -592,6 +592,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockSettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_SETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) if e1 != 0 { @@ -971,23 +981,6 @@ func Getpriority(which int, who int) (prio int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { @@ -2229,3 +2222,19 @@ func Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mseal(b []byte, flags uint) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSEAL, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 9dc42410b..1851df14e 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 41b561731..0b43c6936 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mount_trampoline_addr(SB)/4, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 0d3a0751c..e1ec0dbe4 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 4019a656f..880c6d6e3 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index c39f7776d..7c8452a63 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index ac4af24f9..b8ef95b0f 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4 DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $4 +DATA ·libc_mount_trampoline_addr(SB)/4, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 57571d072..2ffdf861f 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index f77d53212..2af3b5c76 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index e62963e67..1da08d526 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index fae140b62..b7a251353 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 00831354c..6e85b0aac 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index 9d1e0ff06..f15dadf05 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -555,6 +555,12 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_mount(SB) + RET +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_nanosleep(SB) RET diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 79029ed58..28b487df2 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -1493,6 +1493,30 @@ var libc_mknodat_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(fsType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(dir) + if err != nil { + return + } + _, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_mount_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_mount mount "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) if e1 != 0 { diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index da115f9a4..1e7f321e4 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -463,6 +463,11 @@ TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB) +TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_mount(SB) +GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 +DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) + TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_nanosleep(SB) GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 829b87feb..b4609c20c 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -72,7 +72,7 @@ import ( //go:cgo_import_dynamic libc_kill kill "libc.so" //go:cgo_import_dynamic libc_lchown lchown "libc.so" //go:cgo_import_dynamic libc_link link "libc.so" -//go:cgo_import_dynamic libc___xnet_llisten __xnet_llisten "libsocket.so" +//go:cgo_import_dynamic libc___xnet_listen __xnet_listen "libsocket.so" //go:cgo_import_dynamic libc_lstat lstat "libc.so" //go:cgo_import_dynamic libc_madvise madvise "libc.so" //go:cgo_import_dynamic libc_mkdir mkdir "libc.so" @@ -141,6 +141,16 @@ import ( //go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so" //go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" //go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so" +//go:cgo_import_dynamic libc_getpeerucred getpeerucred "libc.so" +//go:cgo_import_dynamic libc_ucred_get ucred_get "libc.so" +//go:cgo_import_dynamic libc_ucred_geteuid ucred_geteuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getegid ucred_getegid "libc.so" +//go:cgo_import_dynamic libc_ucred_getruid ucred_getruid "libc.so" +//go:cgo_import_dynamic libc_ucred_getrgid ucred_getrgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsuid ucred_getsuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsgid ucred_getsgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getpid ucred_getpid "libc.so" +//go:cgo_import_dynamic libc_ucred_free ucred_free "libc.so" //go:cgo_import_dynamic libc_port_create port_create "libc.so" //go:cgo_import_dynamic libc_port_associate port_associate "libc.so" //go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so" @@ -211,7 +221,7 @@ import ( //go:linkname procKill libc_kill //go:linkname procLchown libc_lchown //go:linkname procLink libc_link -//go:linkname proc__xnet_llisten libc___xnet_llisten +//go:linkname proc__xnet_listen libc___xnet_listen //go:linkname procLstat libc_lstat //go:linkname procMadvise libc_madvise //go:linkname procMkdir libc_mkdir @@ -280,6 +290,16 @@ import ( //go:linkname procgetpeername libc_getpeername //go:linkname procsetsockopt libc_setsockopt //go:linkname procrecvfrom libc_recvfrom +//go:linkname procgetpeerucred libc_getpeerucred +//go:linkname procucred_get libc_ucred_get +//go:linkname procucred_geteuid libc_ucred_geteuid +//go:linkname procucred_getegid libc_ucred_getegid +//go:linkname procucred_getruid libc_ucred_getruid +//go:linkname procucred_getrgid libc_ucred_getrgid +//go:linkname procucred_getsuid libc_ucred_getsuid +//go:linkname procucred_getsgid libc_ucred_getsgid +//go:linkname procucred_getpid libc_ucred_getpid +//go:linkname procucred_free libc_ucred_free //go:linkname procport_create libc_port_create //go:linkname procport_associate libc_port_associate //go:linkname procport_dissociate libc_port_dissociate @@ -351,7 +371,7 @@ var ( procKill, procLchown, procLink, - proc__xnet_llisten, + proc__xnet_listen, procLstat, procMadvise, procMkdir, @@ -420,6 +440,16 @@ var ( procgetpeername, procsetsockopt, procrecvfrom, + procgetpeerucred, + procucred_get, + procucred_geteuid, + procucred_getegid, + procucred_getruid, + procucred_getrgid, + procucred_getsuid, + procucred_getsgid, + procucred_getpid, + procucred_free, procport_create, procport_associate, procport_dissociate, @@ -1148,7 +1178,7 @@ func Link(path string, link string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_listen)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -2029,6 +2059,90 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getpeerucred(fd uintptr, ucred *uintptr) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetpeerucred)), 2, uintptr(fd), uintptr(unsafe.Pointer(ucred)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGet(pid int) (ucred uintptr, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procucred_get)), 1, uintptr(pid), 0, 0, 0, 0, 0) + ucred = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGeteuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_geteuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetegid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getegid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetruid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getruid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetrgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getrgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetpid(ucred uintptr) (pid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getpid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredFree(ucred uintptr) { + sysvicall6(uintptr(unsafe.Pointer(&procucred_free)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func port_create() (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0) n = int(r0) diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 53aef5dc5..aca56ee49 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -457,4 +457,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 71d524763..2ea1ef58c 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -341,6 +341,7 @@ const ( SYS_STATX = 332 SYS_IO_PGETEVENTS = 333 SYS_RSEQ = 334 + SYS_URETPROBE = 335 SYS_PIDFD_SEND_SIGNAL = 424 SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 @@ -379,4 +380,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index c74770613..d22c8af31 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -421,4 +421,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index f96e214f6..5ee264ae9 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -85,7 +85,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 @@ -324,4 +324,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 28425346c..f9f03ebf5 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -84,6 +84,8 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 + SYS_NEWFSTATAT = 79 + SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 SYS_FDATASYNC = 83 @@ -318,4 +320,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index d0953018d..87c2118e8 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -441,4 +441,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 4459 SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 + SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 295c7f4b8..391ad102f 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -371,4 +371,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 5459 SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 + SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index d1a9eaca7..565615775 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -371,4 +371,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 5459 SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 + SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index bec157c39..0482b52e3 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -441,4 +441,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 4459 SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 + SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 7ee7bdc43..71806f08f 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -448,4 +448,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index fad1f25b4..e35a71058 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -420,4 +420,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 7d3e16357..2aea47670 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -420,4 +420,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 0ed53ad9f..6c9bb4e56 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -84,7 +84,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 @@ -325,4 +325,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 2fba04ad5..680bc9915 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -386,4 +386,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 621d00d74..620f27105 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -399,4 +399,10 @@ const ( SYS_LSM_GET_SELF_ATTR = 459 SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 + SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 091d107f3..17c53bd9b 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 @@ -449,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -467,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -499,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -544,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 28ff4ef74..2392226a7 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 @@ -449,11 +462,14 @@ type FdSet struct { const ( SizeofIfMsghdr = 0x70 + SizeofIfMsghdr2 = 0xa0 SizeofIfData = 0x60 + SizeofIfData64 = 0x80 SizeofIfaMsghdr = 0x14 SizeofIfmaMsghdr = 0x10 SizeofIfmaMsghdr2 = 0x14 SizeofRtMsghdr = 0x5c + SizeofRtMsghdr2 = 0x5c SizeofRtMetrics = 0x38 ) @@ -467,6 +483,20 @@ type IfMsghdr struct { Data IfData } +type IfMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + Snd_len int32 + Snd_maxlen int32 + Snd_drops int32 + Timer int32 + Data IfData64 +} + type IfData struct { Type uint8 Typelen uint8 @@ -499,6 +529,34 @@ type IfData struct { Reserved2 uint32 } +type IfData64 struct { + Type uint8 + Typelen uint8 + Physical uint8 + Addrlen uint8 + Hdrlen uint8 + Recvquota uint8 + Xmitquota uint8 + Unused1 uint8 + Mtu uint32 + Metric uint32 + Baudrate uint64 + Ipackets uint64 + Ierrors uint64 + Opackets uint64 + Oerrors uint64 + Collisions uint64 + Ibytes uint64 + Obytes uint64 + Imcasts uint64 + Omcasts uint64 + Iqdrops uint64 + Noproto uint64 + Recvtiming uint32 + Xmittiming uint32 + Lastchange Timeval32 +} + type IfaMsghdr struct { Msglen uint16 Version uint8 @@ -544,6 +602,21 @@ type RtMsghdr struct { Rmx RtMetrics } +type RtMsghdr2 struct { + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + Flags int32 + Addrs int32 + Refcnt int32 + Parentflags int32 + Reserved int32 + Use int32 + Inits uint32 + Rmx RtMetrics +} + type RtMetrics struct { Locks uint32 Mtu uint32 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 6cbd094a3..51e13eb05 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -625,6 +625,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 7c03b6ee7..d002d8ef3 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -630,6 +630,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 422107ee8..3f863d898 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -616,6 +616,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 505a12acf..61c729310 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -610,6 +610,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index cc986c790..b5d17414f 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -612,6 +612,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux.go index 4740b8348..944e75a11 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -87,30 +87,37 @@ type StatxTimestamp struct { } type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - Mnt_id uint64 - Dio_mem_align uint32 - Dio_offset_align uint32 - _ [12]uint64 + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + Mnt_id uint64 + Dio_mem_align uint32 + Dio_offset_align uint32 + Subvol uint64 + Atomic_write_unit_min uint32 + Atomic_write_unit_max uint32 + Atomic_write_segments_max uint32 + Dio_read_offset_align uint32 + Atomic_write_unit_max_opt uint32 + _ [1]uint32 + _ [8]uint64 } type Fsid struct { @@ -194,7 +201,8 @@ type FscryptAddKeyArg struct { Key_spec FscryptKeySpecifier Raw_size uint32 Key_id uint32 - _ [8]uint32 + Flags uint32 + _ [7]uint32 } type FscryptRemoveKeyArg struct { @@ -515,6 +523,29 @@ type TCPInfo struct { Total_rto_time uint32 } +type TCPVegasInfo struct { + Enabled uint32 + Rttcnt uint32 + Rtt uint32 + Minrtt uint32 +} + +type TCPDCTCPInfo struct { + Enabled uint16 + Ce_state uint16 + Alpha uint32 + Ab_ecn uint32 + Ab_tot uint32 +} + +type TCPBBRInfo struct { + Bw_lo uint32 + Bw_hi uint32 + Min_rtt uint32 + Pacing_gain uint32 + Cwnd_gain uint32 +} + type CanFilter struct { Id uint32 Mask uint32 @@ -556,6 +587,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0xf8 + SizeofTCPCCInfo = 0x14 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -600,6 +632,8 @@ const ( IFA_FLAGS = 0x8 IFA_RT_PRIORITY = 0x9 IFA_TARGET_NETNSID = 0xa + IFAL_LABEL = 0x2 + IFAL_ADDRESS = 0x1 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -657,6 +691,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfAddrlblmsg = 0xc SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 @@ -708,6 +743,15 @@ type IfAddrmsg struct { Index uint32 } +type IfAddrlblmsg struct { + Family uint8 + _ uint8 + Prefixlen uint8 + Flags uint8 + Index uint32 + Seq uint32 +} + type IfaCacheinfo struct { Prefered uint32 Valid uint32 @@ -1723,12 +1767,6 @@ const ( IFLA_IPVLAN_UNSPEC = 0x0 IFLA_IPVLAN_MODE = 0x1 IFLA_IPVLAN_FLAGS = 0x2 - NETKIT_NEXT = -0x1 - NETKIT_PASS = 0x0 - NETKIT_DROP = 0x2 - NETKIT_REDIRECT = 0x7 - NETKIT_L2 = 0x0 - NETKIT_L3 = 0x1 IFLA_NETKIT_UNSPEC = 0x0 IFLA_NETKIT_PEER_INFO = 0x1 IFLA_NETKIT_PRIMARY = 0x2 @@ -1767,6 +1805,7 @@ const ( IFLA_VXLAN_DF = 0x1d IFLA_VXLAN_VNIFILTER = 0x1e IFLA_VXLAN_LOCALBYPASS = 0x1f + IFLA_VXLAN_LABEL_POLICY = 0x20 IFLA_GENEVE_UNSPEC = 0x0 IFLA_GENEVE_ID = 0x1 IFLA_GENEVE_REMOTE = 0x2 @@ -1796,6 +1835,8 @@ const ( IFLA_GTP_ROLE = 0x4 IFLA_GTP_CREATE_SOCKETS = 0x5 IFLA_GTP_RESTART_COUNT = 0x6 + IFLA_GTP_LOCAL = 0x7 + IFLA_GTP_LOCAL6 = 0x8 IFLA_BOND_UNSPEC = 0x0 IFLA_BOND_MODE = 0x1 IFLA_BOND_ACTIVE_SLAVE = 0x2 @@ -1828,6 +1869,7 @@ const ( IFLA_BOND_AD_LACP_ACTIVE = 0x1d IFLA_BOND_MISSED_MAX = 0x1e IFLA_BOND_NS_IP6_TARGET = 0x1f + IFLA_BOND_COUPLED_CONTROL = 0x20 IFLA_BOND_AD_INFO_UNSPEC = 0x0 IFLA_BOND_AD_INFO_AGGREGATOR = 0x1 IFLA_BOND_AD_INFO_NUM_PORTS = 0x2 @@ -1896,6 +1938,7 @@ const ( IFLA_HSR_SEQ_NR = 0x5 IFLA_HSR_VERSION = 0x6 IFLA_HSR_PROTOCOL = 0x7 + IFLA_HSR_INTERLINK = 0x8 IFLA_STATS_UNSPEC = 0x0 IFLA_STATS_LINK_64 = 0x1 IFLA_STATS_LINK_XSTATS = 0x2 @@ -1948,6 +1991,15 @@ const ( IFLA_DSA_MASTER = 0x1 ) +const ( + NETKIT_NEXT = -0x1 + NETKIT_PASS = 0x0 + NETKIT_DROP = 0x2 + NETKIT_REDIRECT = 0x7 + NETKIT_L2 = 0x0 + NETKIT_L3 = 0x1 +) + const ( NF_INET_PRE_ROUTING = 0x0 NF_INET_LOCAL_IN = 0x1 @@ -2189,8 +2241,11 @@ const ( NFT_PAYLOAD_LL_HEADER = 0x0 NFT_PAYLOAD_NETWORK_HEADER = 0x1 NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 + NFT_PAYLOAD_INNER_HEADER = 0x3 + NFT_PAYLOAD_TUN_HEADER = 0x4 NFT_PAYLOAD_CSUM_NONE = 0x0 NFT_PAYLOAD_CSUM_INET = 0x1 + NFT_PAYLOAD_CSUM_SCTP = 0x2 NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 NFTA_PAYLOAD_UNSPEC = 0x0 NFTA_PAYLOAD_DREG = 0x1 @@ -2277,6 +2332,11 @@ const ( NFT_CT_AVGPKT = 0x10 NFT_CT_ZONE = 0x11 NFT_CT_EVENTMASK = 0x12 + NFT_CT_SRC_IP = 0x13 + NFT_CT_DST_IP = 0x14 + NFT_CT_SRC_IP6 = 0x15 + NFT_CT_DST_IP6 = 0x16 + NFT_CT_ID = 0x17 NFTA_CT_UNSPEC = 0x0 NFTA_CT_DREG = 0x1 NFTA_CT_KEY = 0x2 @@ -2485,7 +2545,7 @@ type XDPMmapOffsets struct { type XDPUmemReg struct { Addr uint64 Len uint64 - Chunk_size uint32 + Size uint32 Headroom uint32 Flags uint32 Tx_metadata_len uint32 @@ -2557,8 +2617,8 @@ const ( SOF_TIMESTAMPING_BIND_PHC = 0x8000 SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x10000 - SOF_TIMESTAMPING_MASK = 0x1ffff + SOF_TIMESTAMPING_LAST = 0x40000 + SOF_TIMESTAMPING_MASK = 0x7ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3004,6 +3064,23 @@ const ( ) const ( + TCA_UNSPEC = 0x0 + TCA_KIND = 0x1 + TCA_OPTIONS = 0x2 + TCA_STATS = 0x3 + TCA_XSTATS = 0x4 + TCA_RATE = 0x5 + TCA_FCNT = 0x6 + TCA_STATS2 = 0x7 + TCA_STAB = 0x8 + TCA_PAD = 0x9 + TCA_DUMP_INVISIBLE = 0xa + TCA_CHAIN = 0xb + TCA_HW_OFFLOAD = 0xc + TCA_INGRESS_BLOCK = 0xd + TCA_EGRESS_BLOCK = 0xe + TCA_DUMP_FLAGS = 0xf + TCA_EXT_WARN_MSG = 0x10 RTNLGRP_NONE = 0x0 RTNLGRP_LINK = 0x1 RTNLGRP_NOTIFY = 0x2 @@ -3038,6 +3115,18 @@ const ( RTNLGRP_IPV6_MROUTE_R = 0x1f RTNLGRP_NEXTHOP = 0x20 RTNLGRP_BRVLAN = 0x21 + RTNLGRP_MCTP_IFADDR = 0x22 + RTNLGRP_TUNNEL = 0x23 + RTNLGRP_STATS = 0x24 + RTNLGRP_IPV4_MCADDR = 0x25 + RTNLGRP_IPV6_MCADDR = 0x26 + RTNLGRP_IPV6_ACADDR = 0x27 + TCA_ROOT_UNSPEC = 0x0 + TCA_ROOT_TAB = 0x1 + TCA_ROOT_FLAGS = 0x2 + TCA_ROOT_COUNT = 0x3 + TCA_ROOT_TIME_DELTA = 0x4 + TCA_ROOT_EXT_WARN_MSG = 0x5 ) type CapUserHeader struct { @@ -3473,7 +3562,7 @@ const ( DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 DEVLINK_PORT_FN_ATTR_CAPS = 0x4 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x5 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x6 ) type FsverityDigest struct { @@ -3504,7 +3593,7 @@ type Nhmsg struct { type NexthopGrp struct { Id uint32 Weight uint8 - Resvd1 uint8 + High uint8 Resvd2 uint16 } @@ -3765,7 +3854,16 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2b + ETHTOOL_MSG_PLCA_GET_CFG = 0x27 + ETHTOOL_MSG_PLCA_SET_CFG = 0x28 + ETHTOOL_MSG_PLCA_GET_STATUS = 0x29 + ETHTOOL_MSG_MM_GET = 0x2a + ETHTOOL_MSG_MM_SET = 0x2b + ETHTOOL_MSG_MODULE_FW_FLASH_ACT = 0x2c + ETHTOOL_MSG_PHY_GET = 0x2d + ETHTOOL_MSG_TSCONFIG_GET = 0x2e + ETHTOOL_MSG_TSCONFIG_SET = 0x2f + ETHTOOL_MSG_USER_MAX = 0x2f ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3805,12 +3903,25 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2b + ETHTOOL_MSG_PLCA_GET_CFG_REPLY = 0x27 + ETHTOOL_MSG_PLCA_GET_STATUS_REPLY = 0x28 + ETHTOOL_MSG_PLCA_NTF = 0x29 + ETHTOOL_MSG_MM_GET_REPLY = 0x2a + ETHTOOL_MSG_MM_NTF = 0x2b + ETHTOOL_MSG_MODULE_FW_FLASH_NTF = 0x2c + ETHTOOL_MSG_PHY_GET_REPLY = 0x2d + ETHTOOL_MSG_PHY_NTF = 0x2e + ETHTOOL_MSG_TSCONFIG_GET_REPLY = 0x2f + ETHTOOL_MSG_TSCONFIG_SET_REPLY = 0x30 + ETHTOOL_MSG_KERNEL_MAX = 0x30 + ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 + ETHTOOL_FLAG_OMIT_REPLY = 0x2 + ETHTOOL_FLAG_STATS = 0x4 ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 ETHTOOL_A_HEADER_FLAGS = 0x3 - ETHTOOL_A_HEADER_MAX = 0x3 + ETHTOOL_A_HEADER_MAX = 0x4 ETHTOOL_A_BITSET_BIT_UNSPEC = 0x0 ETHTOOL_A_BITSET_BIT_INDEX = 0x1 ETHTOOL_A_BITSET_BIT_NAME = 0x2 @@ -3909,7 +4020,12 @@ const ( ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 0xb ETHTOOL_A_RINGS_CQE_SIZE = 0xc ETHTOOL_A_RINGS_TX_PUSH = 0xd - ETHTOOL_A_RINGS_MAX = 0x10 + ETHTOOL_A_RINGS_RX_PUSH = 0xe + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN = 0xf + ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX = 0x10 + ETHTOOL_A_RINGS_HDS_THRESH = 0x11 + ETHTOOL_A_RINGS_HDS_THRESH_MAX = 0x12 + ETHTOOL_A_RINGS_MAX = 0x12 ETHTOOL_A_CHANNELS_UNSPEC = 0x0 ETHTOOL_A_CHANNELS_HEADER = 0x1 ETHTOOL_A_CHANNELS_RX_MAX = 0x2 @@ -3947,7 +4063,7 @@ const ( ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17 ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18 ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19 - ETHTOOL_A_COALESCE_MAX = 0x1c + ETHTOOL_A_COALESCE_MAX = 0x1e ETHTOOL_A_PAUSE_UNSPEC = 0x0 ETHTOOL_A_PAUSE_HEADER = 0x1 ETHTOOL_A_PAUSE_AUTONEG = 0x2 @@ -3975,7 +4091,9 @@ const ( ETHTOOL_A_TSINFO_TX_TYPES = 0x3 ETHTOOL_A_TSINFO_RX_FILTERS = 0x4 ETHTOOL_A_TSINFO_PHC_INDEX = 0x5 - ETHTOOL_A_TSINFO_MAX = 0x5 + ETHTOOL_A_TSINFO_STATS = 0x6 + ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER = 0x7 + ETHTOOL_A_TSINFO_MAX = 0x9 ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_HEADER = 0x1 ETHTOOL_A_CABLE_TEST_MAX = 0x1 @@ -3991,11 +4109,11 @@ const ( ETHTOOL_A_CABLE_RESULT_UNSPEC = 0x0 ETHTOOL_A_CABLE_RESULT_PAIR = 0x1 ETHTOOL_A_CABLE_RESULT_CODE = 0x2 - ETHTOOL_A_CABLE_RESULT_MAX = 0x2 + ETHTOOL_A_CABLE_RESULT_MAX = 0x3 ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC = 0x0 ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR = 0x1 ETHTOOL_A_CABLE_FAULT_LENGTH_CM = 0x2 - ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x2 + ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = 0x3 ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED = 0x1 ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED = 0x2 @@ -4061,6 +4179,19 @@ const ( ETHTOOL_A_TUNNEL_INFO_MAX = 0x2 ) +const ( + TCP_V4_FLOW = 0x1 + UDP_V4_FLOW = 0x2 + TCP_V6_FLOW = 0x5 + UDP_V6_FLOW = 0x6 + ESP_V4_FLOW = 0xa + ESP_V6_FLOW = 0xc + IP_USER_FLOW = 0xd + IPV6_USER_FLOW = 0xe + IPV6_FLOW = 0x11 + ETHER_FLOW = 0x12 +) + const SPEED_UNKNOWN = -0x1 type EthtoolDrvinfo struct { @@ -4078,6 +4209,107 @@ type EthtoolDrvinfo struct { Regdump_len uint32 } +type EthtoolTsInfo struct { + Cmd uint32 + So_timestamping uint32 + Phc_index int32 + Tx_types uint32 + Tx_reserved [3]uint32 + Rx_filters uint32 + Rx_reserved [3]uint32 +} + +type HwTstampConfig struct { + Flags int32 + Tx_type int32 + Rx_filter int32 +} + +const ( + HWTSTAMP_FILTER_NONE = 0x0 + HWTSTAMP_FILTER_ALL = 0x1 + HWTSTAMP_FILTER_SOME = 0x2 + HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 0x3 + HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 0x6 + HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 0x9 + HWTSTAMP_FILTER_PTP_V2_EVENT = 0xc +) + +const ( + HWTSTAMP_TX_OFF = 0x0 + HWTSTAMP_TX_ON = 0x1 + HWTSTAMP_TX_ONESTEP_SYNC = 0x2 +) + +type ( + PtpClockCaps struct { + Max_adj int32 + N_alarm int32 + N_ext_ts int32 + N_per_out int32 + Pps int32 + N_pins int32 + Cross_timestamping int32 + Adjust_phase int32 + Max_phase_adj int32 + Rsv [11]int32 + } + PtpClockTime struct { + Sec int64 + Nsec uint32 + Reserved uint32 + } + PtpExttsEvent struct { + T PtpClockTime + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpExttsRequest struct { + Index uint32 + Flags uint32 + Rsv [2]uint32 + } + PtpPeroutRequest struct { + StartOrPhase PtpClockTime + Period PtpClockTime + Index uint32 + Flags uint32 + On PtpClockTime + } + PtpPinDesc struct { + Name [64]byte + Index uint32 + Func uint32 + Chan uint32 + Rsv [5]uint32 + } + PtpSysOffset struct { + Samples uint32 + Rsv [3]uint32 + Ts [51]PtpClockTime + } + PtpSysOffsetExtended struct { + Samples uint32 + Clockid int32 + Rsv [2]uint32 + Ts [25][3]PtpClockTime + } + PtpSysOffsetPrecise struct { + Device PtpClockTime + Realtime PtpClockTime + Monoraw PtpClockTime + Rsv [4]uint32 + } +) + +const ( + PTP_PF_NONE = 0x0 + PTP_PF_EXTTS = 0x1 + PTP_PF_PEROUT = 0x2 + PTP_PF_PHYSYNC = 0x3 +) + type ( HIDRawReportDescriptor struct { Size uint32 @@ -4259,6 +4491,7 @@ const ( type LandlockRulesetAttr struct { Access_fs uint64 Access_net uint64 + Scoped uint64 } type LandlockPathBeneathAttr struct { @@ -4471,6 +4704,7 @@ const ( NL80211_ATTR_AKM_SUITES = 0x4c NL80211_ATTR_AP_ISOLATE = 0x60 NL80211_ATTR_AP_SETTINGS_FLAGS = 0x135 + NL80211_ATTR_ASSOC_SPP_AMSDU = 0x14a NL80211_ATTR_AUTH_DATA = 0x9c NL80211_ATTR_AUTH_TYPE = 0x35 NL80211_ATTR_BANDS = 0xef @@ -4481,6 +4715,7 @@ const ( NL80211_ATTR_BSS_BASIC_RATES = 0x24 NL80211_ATTR_BSS = 0x2f NL80211_ATTR_BSS_CTS_PROT = 0x1c + NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA = 0x147 NL80211_ATTR_BSS_HT_OPMODE = 0x6d NL80211_ATTR_BSSID = 0xf5 NL80211_ATTR_BSS_SELECT = 0xe3 @@ -4540,6 +4775,7 @@ const ( NL80211_ATTR_DTIM_PERIOD = 0xd NL80211_ATTR_DURATION = 0x57 NL80211_ATTR_EHT_CAPABILITY = 0x136 + NL80211_ATTR_EMA_RNR_ELEMS = 0x145 NL80211_ATTR_EML_CAPABILITY = 0x13d NL80211_ATTR_EXT_CAPA = 0xa9 NL80211_ATTR_EXT_CAPA_MASK = 0xaa @@ -4575,6 +4811,7 @@ const ( NL80211_ATTR_HIDDEN_SSID = 0x7e NL80211_ATTR_HT_CAPABILITY = 0x1f NL80211_ATTR_HT_CAPABILITY_MASK = 0x94 + NL80211_ATTR_HW_TIMESTAMP_ENABLED = 0x144 NL80211_ATTR_IE_ASSOC_RESP = 0x80 NL80211_ATTR_IE = 0x2a NL80211_ATTR_IE_PROBE_RESP = 0x7f @@ -4605,9 +4842,10 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14a + NL80211_ATTR_MAX = 0x151 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce + NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS = 0x143 NL80211_ATTR_MAX_MATCH_SETS = 0x85 NL80211_ATTR_MAX_NUM_AKM_SUITES = 0x13c NL80211_ATTR_MAX_NUM_PMKIDS = 0x56 @@ -4632,9 +4870,12 @@ const ( NL80211_ATTR_MGMT_SUBTYPE = 0x29 NL80211_ATTR_MLD_ADDR = 0x13a NL80211_ATTR_MLD_CAPA_AND_OPS = 0x13e + NL80211_ATTR_MLO_LINK_DISABLED = 0x146 NL80211_ATTR_MLO_LINK_ID = 0x139 NL80211_ATTR_MLO_LINKS = 0x138 NL80211_ATTR_MLO_SUPPORT = 0x13b + NL80211_ATTR_MLO_TTLM_DLINK = 0x148 + NL80211_ATTR_MLO_TTLM_ULINK = 0x149 NL80211_ATTR_MNTR_FLAGS = 0x17 NL80211_ATTR_MPATH_INFO = 0x1b NL80211_ATTR_MPATH_NEXT_HOP = 0x1a @@ -4667,12 +4908,14 @@ const ( NL80211_ATTR_PORT_AUTHORIZED = 0x103 NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN = 0x5 NL80211_ATTR_POWER_RULE_MAX_EIRP = 0x6 + NL80211_ATTR_POWER_RULE_PSD = 0x8 NL80211_ATTR_PREV_BSSID = 0x4f NL80211_ATTR_PRIVACY = 0x46 NL80211_ATTR_PROBE_RESP = 0x91 NL80211_ATTR_PROBE_RESP_OFFLOAD = 0x90 NL80211_ATTR_PROTOCOL_FEATURES = 0xad NL80211_ATTR_PS_STATE = 0x5d + NL80211_ATTR_PUNCT_BITMAP = 0x142 NL80211_ATTR_QOS_MAP = 0xc7 NL80211_ATTR_RADAR_BACKGROUND = 0x134 NL80211_ATTR_RADAR_EVENT = 0xa8 @@ -4801,7 +5044,9 @@ const ( NL80211_ATTR_WIPHY_FREQ = 0x26 NL80211_ATTR_WIPHY_FREQ_HINT = 0xc9 NL80211_ATTR_WIPHY_FREQ_OFFSET = 0x122 + NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS = 0x14c NL80211_ATTR_WIPHY_NAME = 0x2 + NL80211_ATTR_WIPHY_RADIOS = 0x14b NL80211_ATTR_WIPHY_RETRY_LONG = 0x3e NL80211_ATTR_WIPHY_RETRY_SHORT = 0x3d NL80211_ATTR_WIPHY_RTS_THRESHOLD = 0x40 @@ -4836,6 +5081,8 @@ const ( NL80211_BAND_ATTR_IFTYPE_DATA = 0x9 NL80211_BAND_ATTR_MAX = 0xd NL80211_BAND_ATTR_RATES = 0x2 + NL80211_BAND_ATTR_S1G_CAPA = 0xd + NL80211_BAND_ATTR_S1G_MCS_NSS_SET = 0xc NL80211_BAND_ATTR_VHT_CAPA = 0x8 NL80211_BAND_ATTR_VHT_MCS_SET = 0x7 NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC = 0x8 @@ -4859,6 +5106,10 @@ const ( NL80211_BSS_BEACON_INTERVAL = 0x4 NL80211_BSS_BEACON_TSF = 0xd NL80211_BSS_BSSID = 0x1 + NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH = 0x2 + NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY = 0x1 + NL80211_BSS_CANNOT_USE_REASONS = 0x18 + NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH = 0x2 NL80211_BSS_CAPABILITY = 0x5 NL80211_BSS_CHAIN_SIGNAL = 0x13 NL80211_BSS_CHAN_WIDTH_10 = 0x1 @@ -4890,6 +5141,9 @@ const ( NL80211_BSS_STATUS = 0x9 NL80211_BSS_STATUS_IBSS_JOINED = 0x2 NL80211_BSS_TSF = 0x3 + NL80211_BSS_USE_FOR = 0x17 + NL80211_BSS_USE_FOR_MLD_LINK = 0x2 + NL80211_BSS_USE_FOR_NORMAL = 0x1 NL80211_CHAN_HT20 = 0x1 NL80211_CHAN_HT40MINUS = 0x2 NL80211_CHAN_HT40PLUS = 0x3 @@ -4975,7 +5229,8 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x9b + NL80211_CMD_LINKS_REMOVED = 0x9a + NL80211_CMD_MAX = 0x9d NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 @@ -5019,6 +5274,7 @@ const ( NL80211_CMD_SET_COALESCE = 0x65 NL80211_CMD_SET_CQM = 0x3f NL80211_CMD_SET_FILS_AAD = 0x92 + NL80211_CMD_SET_HW_TIMESTAMP = 0x99 NL80211_CMD_SET_INTERFACE = 0x6 NL80211_CMD_SET_KEY = 0xa NL80211_CMD_SET_MAC_ACL = 0x5d @@ -5038,6 +5294,7 @@ const ( NL80211_CMD_SET_SAR_SPECS = 0x8c NL80211_CMD_SET_STATION = 0x12 NL80211_CMD_SET_TID_CONFIG = 0x89 + NL80211_CMD_SET_TID_TO_LINK_MAPPING = 0x9b NL80211_CMD_SET_TX_BITRATE_MASK = 0x39 NL80211_CMD_SET_WDS_PEER = 0x42 NL80211_CMD_SET_WIPHY = 0x2 @@ -5105,6 +5362,7 @@ const ( NL80211_EXT_FEATURE_AIRTIME_FAIRNESS = 0x21 NL80211_EXT_FEATURE_AP_PMKSA_CACHING = 0x22 NL80211_EXT_FEATURE_AQL = 0x28 + NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA = 0x40 NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT = 0x2e NL80211_EXT_FEATURE_BEACON_PROTECTION = 0x29 NL80211_EXT_FEATURE_BEACON_RATE_HE = 0x36 @@ -5120,6 +5378,7 @@ const ( NL80211_EXT_FEATURE_CQM_RSSI_LIST = 0xd NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT = 0x1b NL80211_EXT_FEATURE_DEL_IBSS_STA = 0x2c + NL80211_EXT_FEATURE_DFS_CONCURRENT = 0x43 NL80211_EXT_FEATURE_DFS_OFFLOAD = 0x19 NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER = 0x20 NL80211_EXT_FEATURE_EXT_KEY_ID = 0x24 @@ -5139,9 +5398,12 @@ const ( NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 0x14 NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE = 0x13 NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION = 0x31 + NL80211_EXT_FEATURE_OWE_OFFLOAD_AP = 0x42 + NL80211_EXT_FEATURE_OWE_OFFLOAD = 0x41 NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE = 0x3d NL80211_EXT_FEATURE_PROTECTED_TWT = 0x2b NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE = 0x39 + NL80211_EXT_FEATURE_PUNCT = 0x3e NL80211_EXT_FEATURE_RADAR_BACKGROUND = 0x3c NL80211_EXT_FEATURE_RRM = 0x1 NL80211_EXT_FEATURE_SAE_OFFLOAD_AP = 0x33 @@ -5153,8 +5415,10 @@ const ( NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD = 0x23 NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI = 0xc NL80211_EXT_FEATURE_SECURE_LTF = 0x37 + NL80211_EXT_FEATURE_SECURE_NAN = 0x3f NL80211_EXT_FEATURE_SECURE_RTT = 0x38 NL80211_EXT_FEATURE_SET_SCAN_DWELL = 0x5 + NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT = 0x44 NL80211_EXT_FEATURE_STA_TX_PWR = 0x25 NL80211_EXT_FEATURE_TXQS = 0x1c NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP = 0x35 @@ -5201,7 +5465,10 @@ const ( NL80211_FREQUENCY_ATTR_2MHZ = 0x16 NL80211_FREQUENCY_ATTR_4MHZ = 0x17 NL80211_FREQUENCY_ATTR_8MHZ = 0x18 + NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP = 0x21 + NL80211_FREQUENCY_ATTR_CAN_MONITOR = 0x20 NL80211_FREQUENCY_ATTR_DFS_CAC_TIME = 0xd + NL80211_FREQUENCY_ATTR_DFS_CONCURRENT = 0x1d NL80211_FREQUENCY_ATTR_DFS_STATE = 0x7 NL80211_FREQUENCY_ATTR_DFS_TIME = 0x8 NL80211_FREQUENCY_ATTR_DISABLED = 0x2 @@ -5209,12 +5476,14 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x20 + NL80211_FREQUENCY_ATTR_MAX = 0x22 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc NL80211_FREQUENCY_ATTR_NO_20MHZ = 0x10 NL80211_FREQUENCY_ATTR_NO_320MHZ = 0x1a + NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT = 0x1f + NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT = 0x1e NL80211_FREQUENCY_ATTR_NO_80MHZ = 0xb NL80211_FREQUENCY_ATTR_NO_EHT = 0x1b NL80211_FREQUENCY_ATTR_NO_HE = 0x13 @@ -5222,8 +5491,11 @@ const ( NL80211_FREQUENCY_ATTR_NO_HT40_PLUS = 0xa NL80211_FREQUENCY_ATTR_NO_IBSS = 0x3 NL80211_FREQUENCY_ATTR_NO_IR = 0x3 + NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT = 0x1f + NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT = 0x1e NL80211_FREQUENCY_ATTR_OFFSET = 0x14 NL80211_FREQUENCY_ATTR_PASSIVE_SCAN = 0x3 + NL80211_FREQUENCY_ATTR_PSD = 0x1c NL80211_FREQUENCY_ATTR_RADAR = 0x5 NL80211_FREQUENCY_ATTR_WMM = 0x12 NL80211_FTM_RESP_ATTR_CIVICLOC = 0x3 @@ -5288,6 +5560,7 @@ const ( NL80211_IFTYPE_STATION = 0x2 NL80211_IFTYPE_UNSPECIFIED = 0x0 NL80211_IFTYPE_WDS = 0x5 + NL80211_KCK_EXT_LEN_32 = 0x20 NL80211_KCK_EXT_LEN = 0x18 NL80211_KCK_LEN = 0x10 NL80211_KEK_EXT_LEN = 0x20 @@ -5316,9 +5589,10 @@ const ( NL80211_MAX_SUPP_HT_RATES = 0x4d NL80211_MAX_SUPP_RATES = 0x20 NL80211_MAX_SUPP_REG_RULES = 0x80 + NL80211_MAX_SUPP_SELECTORS = 0x80 NL80211_MBSSID_CONFIG_ATTR_EMA = 0x5 NL80211_MBSSID_CONFIG_ATTR_INDEX = 0x3 - NL80211_MBSSID_CONFIG_ATTR_MAX = 0x5 + NL80211_MBSSID_CONFIG_ATTR_MAX = 0x6 NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY = 0x2 NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES = 0x1 NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX = 0x4 @@ -5377,7 +5651,7 @@ const ( NL80211_MNTR_FLAG_CONTROL = 0x3 NL80211_MNTR_FLAG_COOK_FRAMES = 0x5 NL80211_MNTR_FLAG_FCSFAIL = 0x1 - NL80211_MNTR_FLAG_MAX = 0x6 + NL80211_MNTR_FLAG_MAX = 0x7 NL80211_MNTR_FLAG_OTHER_BSS = 0x4 NL80211_MNTR_FLAG_PLCPFAIL = 0x2 NL80211_MPATH_FLAG_ACTIVE = 0x1 @@ -5561,11 +5835,16 @@ const ( NL80211_RADAR_PRE_CAC_EXPIRED = 0x4 NL80211_RATE_INFO_10_MHZ_WIDTH = 0xb NL80211_RATE_INFO_160_MHZ_WIDTH = 0xa + NL80211_RATE_INFO_16_MHZ_WIDTH = 0x1d + NL80211_RATE_INFO_1_MHZ_WIDTH = 0x19 + NL80211_RATE_INFO_2_MHZ_WIDTH = 0x1a NL80211_RATE_INFO_320_MHZ_WIDTH = 0x12 NL80211_RATE_INFO_40_MHZ_WIDTH = 0x3 + NL80211_RATE_INFO_4_MHZ_WIDTH = 0x1b NL80211_RATE_INFO_5_MHZ_WIDTH = 0xc NL80211_RATE_INFO_80_MHZ_WIDTH = 0x8 NL80211_RATE_INFO_80P80_MHZ_WIDTH = 0x9 + NL80211_RATE_INFO_8_MHZ_WIDTH = 0x1c NL80211_RATE_INFO_BITRATE32 = 0x5 NL80211_RATE_INFO_BITRATE = 0x1 NL80211_RATE_INFO_EHT_GI_0_8 = 0x0 @@ -5611,6 +5890,8 @@ const ( NL80211_RATE_INFO_HE_RU_ALLOC = 0x11 NL80211_RATE_INFO_MAX = 0x1d NL80211_RATE_INFO_MCS = 0x2 + NL80211_RATE_INFO_S1G_MCS = 0x17 + NL80211_RATE_INFO_S1G_NSS = 0x18 NL80211_RATE_INFO_SHORT_GI = 0x4 NL80211_RATE_INFO_VHT_MCS = 0x6 NL80211_RATE_INFO_VHT_NSS = 0x7 @@ -5628,14 +5909,19 @@ const ( NL80211_REKEY_DATA_KEK = 0x1 NL80211_REKEY_DATA_REPLAY_CTR = 0x3 NL80211_REPLAY_CTR_LEN = 0x8 + NL80211_RRF_ALLOW_6GHZ_VLP_AP = 0x1000000 NL80211_RRF_AUTO_BW = 0x800 NL80211_RRF_DFS = 0x10 + NL80211_RRF_DFS_CONCURRENT = 0x200000 NL80211_RRF_GO_CONCURRENT = 0x1000 NL80211_RRF_IR_CONCURRENT = 0x1000 NL80211_RRF_NO_160MHZ = 0x10000 NL80211_RRF_NO_320MHZ = 0x40000 + NL80211_RRF_NO_6GHZ_AFC_CLIENT = 0x800000 + NL80211_RRF_NO_6GHZ_VLP_CLIENT = 0x400000 NL80211_RRF_NO_80MHZ = 0x8000 NL80211_RRF_NO_CCK = 0x2 + NL80211_RRF_NO_EHT = 0x80000 NL80211_RRF_NO_HE = 0x20000 NL80211_RRF_NO_HT40 = 0x6000 NL80211_RRF_NO_HT40MINUS = 0x2000 @@ -5646,7 +5932,10 @@ const ( NL80211_RRF_NO_IR = 0x80 NL80211_RRF_NO_OFDM = 0x1 NL80211_RRF_NO_OUTDOOR = 0x8 + NL80211_RRF_NO_UHB_AFC_CLIENT = 0x800000 + NL80211_RRF_NO_UHB_VLP_CLIENT = 0x400000 NL80211_RRF_PASSIVE_SCAN = 0x80 + NL80211_RRF_PSD = 0x100000 NL80211_RRF_PTMP_ONLY = 0x40 NL80211_RRF_PTP_ONLY = 0x20 NL80211_RXMGMT_FLAG_ANSWERED = 0x1 @@ -5707,6 +5996,7 @@ const ( NL80211_STA_FLAG_MAX_OLD_API = 0x6 NL80211_STA_FLAG_MFP = 0x4 NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2 + NL80211_STA_FLAG_SPP_AMSDU = 0x8 NL80211_STA_FLAG_TDLS_PEER = 0x6 NL80211_STA_FLAG_WME = 0x3 NL80211_STA_INFO_ACK_SIGNAL_AVG = 0x23 @@ -5865,6 +6155,13 @@ const ( NL80211_VHT_CAPABILITY_LEN = 0xc NL80211_VHT_NSS_MAX = 0x8 NL80211_WIPHY_NAME_MAXLEN = 0x40 + NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE = 0x2 + NL80211_WIPHY_RADIO_ATTR_INDEX = 0x1 + NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION = 0x3 + NL80211_WIPHY_RADIO_ATTR_MAX = 0x4 + NL80211_WIPHY_RADIO_FREQ_ATTR_END = 0x2 + NL80211_WIPHY_RADIO_FREQ_ATTR_MAX = 0x2 + NL80211_WIPHY_RADIO_FREQ_ATTR_START = 0x1 NL80211_WMMR_AIFSN = 0x3 NL80211_WMMR_CW_MAX = 0x2 NL80211_WMMR_CW_MIN = 0x1 @@ -5896,6 +6193,7 @@ const ( NL80211_WOWLAN_TRIG_PKT_PATTERN = 0x4 NL80211_WOWLAN_TRIG_RFKILL_RELEASE = 0x9 NL80211_WOWLAN_TRIG_TCP_CONNECTION = 0xe + NL80211_WOWLAN_TRIG_UNPROTECTED_DEAUTH_DISASSOC = 0x14 NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211 = 0xa NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN = 0xb NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023 = 0xc @@ -6032,3 +6330,5 @@ type SockDiagReq struct { Family uint8 Protocol uint8 } + +const RTM_NEWNVLAN = 0x70 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index fd402da43..485f2d3a1 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -282,7 +282,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -338,6 +338,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index eb7a5e186..ecbd1ad8b 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -351,6 +351,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index d78ac108b..02f0463a4 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -91,7 +91,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -273,7 +273,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -329,6 +329,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index cd06d47f1..6f4d400d2 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -330,6 +330,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 2f28fe26c..cd532cfa5 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -331,6 +331,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 71d6cac2f..413362085 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -278,7 +278,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -334,6 +334,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 8596d4535..eaa37eb71 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -333,6 +333,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index cd60ea186..98ae6a1e4 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -333,6 +333,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index b0ae420c4..cae196159 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -278,7 +278,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -334,6 +334,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 835972875..6ce3b4e02 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -90,7 +90,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -285,7 +285,7 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 Blkio_count uint64 @@ -341,6 +341,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint32 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 69eb6a5c6..c7429c6a1 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -340,6 +340,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 5f583cb62..4bf4baf4c 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -340,6 +340,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 15adc0414..e9709d70a 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -358,6 +358,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 @@ -727,6 +743,37 @@ const ( RISCV_HWPROBE_EXT_ZBA = 0x8 RISCV_HWPROBE_EXT_ZBB = 0x10 RISCV_HWPROBE_EXT_ZBS = 0x20 + RISCV_HWPROBE_EXT_ZICBOZ = 0x40 + RISCV_HWPROBE_EXT_ZBC = 0x80 + RISCV_HWPROBE_EXT_ZBKB = 0x100 + RISCV_HWPROBE_EXT_ZBKC = 0x200 + RISCV_HWPROBE_EXT_ZBKX = 0x400 + RISCV_HWPROBE_EXT_ZKND = 0x800 + RISCV_HWPROBE_EXT_ZKNE = 0x1000 + RISCV_HWPROBE_EXT_ZKNH = 0x2000 + RISCV_HWPROBE_EXT_ZKSED = 0x4000 + RISCV_HWPROBE_EXT_ZKSH = 0x8000 + RISCV_HWPROBE_EXT_ZKT = 0x10000 + RISCV_HWPROBE_EXT_ZVBB = 0x20000 + RISCV_HWPROBE_EXT_ZVBC = 0x40000 + RISCV_HWPROBE_EXT_ZVKB = 0x80000 + RISCV_HWPROBE_EXT_ZVKG = 0x100000 + RISCV_HWPROBE_EXT_ZVKNED = 0x200000 + RISCV_HWPROBE_EXT_ZVKNHA = 0x400000 + RISCV_HWPROBE_EXT_ZVKNHB = 0x800000 + RISCV_HWPROBE_EXT_ZVKSED = 0x1000000 + RISCV_HWPROBE_EXT_ZVKSH = 0x2000000 + RISCV_HWPROBE_EXT_ZVKT = 0x4000000 + RISCV_HWPROBE_EXT_ZFH = 0x8000000 + RISCV_HWPROBE_EXT_ZFHMIN = 0x10000000 + RISCV_HWPROBE_EXT_ZIHINTNTL = 0x20000000 + RISCV_HWPROBE_EXT_ZVFH = 0x40000000 + RISCV_HWPROBE_EXT_ZVFHMIN = 0x80000000 + RISCV_HWPROBE_EXT_ZFA = 0x100000000 + RISCV_HWPROBE_EXT_ZTSO = 0x200000000 + RISCV_HWPROBE_EXT_ZACAS = 0x400000000 + RISCV_HWPROBE_EXT_ZICOND = 0x800000000 + RISCV_HWPROBE_EXT_ZIHINTPAUSE = 0x1000000000 RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 @@ -734,4 +781,6 @@ const ( RISCV_HWPROBE_MISALIGNED_FAST = 0x3 RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4 RISCV_HWPROBE_MISALIGNED_MASK = 0x7 + RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE = 0x6 + RISCV_HWPROBE_WHICH_CPUS = 0x1 ) diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index cf3ce9003..fb44268ca 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -353,6 +353,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 590b56739..9c38265c7 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -335,6 +335,22 @@ type Taskstats struct { Wpcopy_delay_total uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 + Irq_delay_max uint64 + Irq_delay_min uint64 } type cpuMask uint64 diff --git a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index d9a13af46..2e5d5a443 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/src/nvcgo/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -377,6 +377,12 @@ type Flock_t struct { Pid int32 } +type F_cnvrt struct { + Cvtcmd int32 + Pccsid int16 + Fccsid int16 +} + type Termios struct { Cflag uint32 Iflag uint32 diff --git a/src/nvcgo/vendor/golang.org/x/sys/windows/dll_windows.go b/src/nvcgo/vendor/golang.org/x/sys/windows/dll_windows.go index 115341fba..3ca814f54 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/src/nvcgo/vendor/golang.org/x/sys/windows/dll_windows.go @@ -43,8 +43,8 @@ type DLL struct { // LoadDLL loads DLL file into memory. // // Warning: using LoadDLL without an absolute path name is subject to -// DLL preloading attacks. To safely load a system DLL, use LazyDLL -// with System set to true, or use LoadLibraryEx directly. +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL], +// or use [LoadLibraryEx] directly. func LoadDLL(name string) (dll *DLL, err error) { namep, err := UTF16PtrFromString(name) if err != nil { @@ -65,7 +65,7 @@ func LoadDLL(name string) (dll *DLL, err error) { return d, nil } -// MustLoadDLL is like LoadDLL but panics if load operation failes. +// MustLoadDLL is like LoadDLL but panics if load operation fails. func MustLoadDLL(name string) *DLL { d, e := LoadDLL(name) if e != nil { @@ -271,6 +271,9 @@ func (d *LazyDLL) NewProc(name string) *LazyProc { } // NewLazyDLL creates new LazyDLL associated with DLL file. +// +// Warning: using NewLazyDLL without an absolute path name is subject to +// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL]. func NewLazyDLL(name string) *LazyDLL { return &LazyDLL{Name: name} } @@ -410,7 +413,3 @@ func loadLibraryEx(name string, system bool) (*DLL, error) { } return &DLL{Name: name, Handle: h}, nil } - -type errString string - -func (s errString) Error() string { return string(s) } diff --git a/src/nvcgo/vendor/golang.org/x/sys/windows/security_windows.go b/src/nvcgo/vendor/golang.org/x/sys/windows/security_windows.go index 6f7d2ac70..a8b0364c7 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/windows/security_windows.go +++ b/src/nvcgo/vendor/golang.org/x/sys/windows/security_windows.go @@ -894,7 +894,7 @@ type ACL struct { aclRevision byte sbz1 byte aclSize uint16 - aceCount uint16 + AceCount uint16 sbz2 uint16 } @@ -1087,6 +1087,27 @@ type EXPLICIT_ACCESS struct { Trustee TRUSTEE } +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header +type ACE_HEADER struct { + AceType uint8 + AceFlags uint8 + AceSize uint16 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-access_allowed_ace +type ACCESS_ALLOWED_ACE struct { + Header ACE_HEADER + Mask ACCESS_MASK + SidStart uint32 +} + +const ( + // Constants for AceType + // https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header + ACCESS_ALLOWED_ACE_TYPE = 0 + ACCESS_DENIED_ACE_TYPE = 1 +) + // This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions. type TrusteeValue uintptr @@ -1158,6 +1179,7 @@ type OBJECTS_AND_NAME struct { //sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD //sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW +//sys GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) = advapi32.GetAce // Control returns the security descriptor control bits. func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { @@ -1281,7 +1303,10 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DE return nil, err } if absoluteSDSize > 0 { - absoluteSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, absoluteSDSize)[0])) + absoluteSD = new(SECURITY_DESCRIPTOR) + if unsafe.Sizeof(*absoluteSD) < uintptr(absoluteSDSize) { + panic("sizeof(SECURITY_DESCRIPTOR) too small") + } } var ( dacl *ACL @@ -1290,19 +1315,55 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DE group *SID ) if daclSize > 0 { - dacl = (*ACL)(unsafe.Pointer(&make([]byte, daclSize)[0])) + dacl = (*ACL)(unsafe.Pointer(unsafe.SliceData(make([]byte, daclSize)))) } if saclSize > 0 { - sacl = (*ACL)(unsafe.Pointer(&make([]byte, saclSize)[0])) + sacl = (*ACL)(unsafe.Pointer(unsafe.SliceData(make([]byte, saclSize)))) } if ownerSize > 0 { - owner = (*SID)(unsafe.Pointer(&make([]byte, ownerSize)[0])) + owner = (*SID)(unsafe.Pointer(unsafe.SliceData(make([]byte, ownerSize)))) } if groupSize > 0 { - group = (*SID)(unsafe.Pointer(&make([]byte, groupSize)[0])) + group = (*SID)(unsafe.Pointer(unsafe.SliceData(make([]byte, groupSize)))) } + // We call into Windows via makeAbsoluteSD, which sets up + // pointers within absoluteSD that point to other chunks of memory + // we pass into makeAbsoluteSD, and that happens outside the view of the GC. + // We therefore take some care here to then verify the pointers are as we expect + // and set them explicitly in view of the GC. See https://go.dev/issue/73199. + // TODO: consider weak pointers once Go 1.24 is appropriate. See suggestion in https://go.dev/cl/663575. err = makeAbsoluteSD(selfRelativeSD, absoluteSD, &absoluteSDSize, dacl, &daclSize, sacl, &saclSize, owner, &ownerSize, group, &groupSize) + if err != nil { + // Don't return absoluteSD, which might be partially initialized. + return nil, err + } + // Before using any fields, verify absoluteSD is in the format we expect according to Windows. + // See https://learn.microsoft.com/en-us/windows/win32/secauthz/absolute-and-self-relative-security-descriptors + absControl, _, err := absoluteSD.Control() + if err != nil { + panic("absoluteSD: " + err.Error()) + } + if absControl&SE_SELF_RELATIVE != 0 { + panic("absoluteSD not in absolute format") + } + if absoluteSD.dacl != dacl { + panic("dacl pointer mismatch") + } + if absoluteSD.sacl != sacl { + panic("sacl pointer mismatch") + } + if absoluteSD.owner != owner { + panic("owner pointer mismatch") + } + if absoluteSD.group != group { + panic("group pointer mismatch") + } + absoluteSD.dacl = dacl + absoluteSD.sacl = sacl + absoluteSD.owner = owner + absoluteSD.group = group + return } diff --git a/src/nvcgo/vendor/golang.org/x/sys/windows/syscall_windows.go b/src/nvcgo/vendor/golang.org/x/sys/windows/syscall_windows.go index 6525c62f3..bd5133730 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/src/nvcgo/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -17,8 +17,10 @@ import ( "unsafe" ) -type Handle uintptr -type HWND uintptr +type ( + Handle uintptr + HWND uintptr +) const ( InvalidHandle = ^Handle(0) @@ -166,6 +168,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) //sys DisconnectNamedPipe(pipe Handle) (err error) +//sys GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) +//sys GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState @@ -211,6 +215,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) //sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW //sys GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) = user32.GetWindowThreadProcessId +//sys LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) [failretval==0] = user32.LoadKeyboardLayoutW +//sys UnloadKeyboardLayout(hkl Handle) (err error) = user32.UnloadKeyboardLayout +//sys GetKeyboardLayout(tid uint32) (hkl Handle) = user32.GetKeyboardLayout +//sys ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) = user32.ToUnicodeEx //sys GetShellWindow() (shellWindow HWND) = user32.GetShellWindow //sys MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW //sys ExitWindowsEx(flags uint32, reason uint32) (err error) = user32.ExitWindowsEx @@ -307,8 +315,14 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo //sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition +//sys GetConsoleCP() (cp uint32, err error) = kernel32.GetConsoleCP +//sys GetConsoleOutputCP() (cp uint32, err error) = kernel32.GetConsoleOutputCP +//sys SetConsoleCP(cp uint32) (err error) = kernel32.SetConsoleCP +//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW +//sys GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) = kernel32.GetNumberOfConsoleInputEvents +//sys FlushConsoleInputBuffer(console Handle) (err error) = kernel32.FlushConsoleInputBuffer //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot //sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW @@ -715,20 +729,12 @@ func DurationSinceBoot() time.Duration { } func Ftruncate(fd Handle, length int64) (err error) { - curoffset, e := Seek(fd, 0, 1) - if e != nil { - return e - } - defer Seek(fd, curoffset, 0) - _, e = Seek(fd, length, 0) - if e != nil { - return e - } - e = SetEndOfFile(fd) - if e != nil { - return e + type _FILE_END_OF_FILE_INFO struct { + EndOfFile int64 } - return nil + var info _FILE_END_OF_FILE_INFO + info.EndOfFile = length + return SetFileInformationByHandle(fd, FileEndOfFileInfo, (*byte)(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))) } func Gettimeofday(tv *Timeval) (err error) { @@ -866,6 +872,7 @@ const socket_error = uintptr(^uint32(0)) //sys WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom //sys WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo //sys WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.WSASocketW +//sys WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) [failretval!=0] = ws2_32.WSADuplicateSocketW //sys GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname //sys GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname //sys Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs @@ -884,6 +891,11 @@ const socket_error = uintptr(^uint32(0)) //sys GetACP() (acp uint32) = kernel32.GetACP //sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar //sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx +//sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex +//sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry +//sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange +//sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange +//sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2 // For testing: clients can set this flag to force // creation of IPv6 sockets to return EAFNOSUPPORT. @@ -1368,9 +1380,11 @@ func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) { func SetsockoptInet4Addr(fd Handle, level, opt int, value [4]byte) (err error) { return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&value[0])), 4) } + func SetsockoptIPMreq(fd Handle, level, opt int, mreq *IPMreq) (err error) { return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(mreq)), int32(unsafe.Sizeof(*mreq))) } + func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { return syscall.EWINDOWS } @@ -1673,19 +1687,23 @@ func (s NTStatus) Error() string { // do not use NTUnicodeString, and instead UTF16PtrFromString should be used for // the more common *uint16 string type. func NewNTUnicodeString(s string) (*NTUnicodeString, error) { - var u NTUnicodeString - s16, err := UTF16PtrFromString(s) + s16, err := UTF16FromString(s) if err != nil { return nil, err } - RtlInitUnicodeString(&u, s16) - return &u, nil + n := uint16(len(s16) * 2) + return &NTUnicodeString{ + Length: n - 2, // subtract 2 bytes for the NULL terminator + MaximumLength: n, + Buffer: &s16[0], + }, nil } // Slice returns a uint16 slice that aliases the data in the NTUnicodeString. func (s *NTUnicodeString) Slice() []uint16 { - slice := unsafe.Slice(s.Buffer, s.MaximumLength) - return slice[:s.Length] + // Note: this rounds the length down, if it happens + // to (incorrectly) be odd. Probably safer than rounding up. + return unsafe.Slice(s.Buffer, s.MaximumLength/2)[:s.Length/2] } func (s *NTUnicodeString) String() string { diff --git a/src/nvcgo/vendor/golang.org/x/sys/windows/types_windows.go b/src/nvcgo/vendor/golang.org/x/sys/windows/types_windows.go index d8cb71db0..358be3c7f 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/windows/types_windows.go +++ b/src/nvcgo/vendor/golang.org/x/sys/windows/types_windows.go @@ -65,6 +65,22 @@ var signals = [...]string{ 15: "terminated", } +// File flags for [os.OpenFile]. The O_ prefix is used to indicate +// that these flags are specific to the OpenFile function. +const ( + O_FILE_FLAG_OPEN_NO_RECALL = FILE_FLAG_OPEN_NO_RECALL + O_FILE_FLAG_OPEN_REPARSE_POINT = FILE_FLAG_OPEN_REPARSE_POINT + O_FILE_FLAG_SESSION_AWARE = FILE_FLAG_SESSION_AWARE + O_FILE_FLAG_POSIX_SEMANTICS = FILE_FLAG_POSIX_SEMANTICS + O_FILE_FLAG_BACKUP_SEMANTICS = FILE_FLAG_BACKUP_SEMANTICS + O_FILE_FLAG_DELETE_ON_CLOSE = FILE_FLAG_DELETE_ON_CLOSE + O_FILE_FLAG_SEQUENTIAL_SCAN = FILE_FLAG_SEQUENTIAL_SCAN + O_FILE_FLAG_RANDOM_ACCESS = FILE_FLAG_RANDOM_ACCESS + O_FILE_FLAG_NO_BUFFERING = FILE_FLAG_NO_BUFFERING + O_FILE_FLAG_OVERLAPPED = FILE_FLAG_OVERLAPPED + O_FILE_FLAG_WRITE_THROUGH = FILE_FLAG_WRITE_THROUGH +) + const ( FILE_READ_DATA = 0x00000001 FILE_READ_ATTRIBUTES = 0x00000080 @@ -176,6 +192,7 @@ const ( WAIT_FAILED = 0xFFFFFFFF // Access rights for process. + PROCESS_ALL_ACCESS = 0xFFFF PROCESS_CREATE_PROCESS = 0x0080 PROCESS_CREATE_THREAD = 0x0002 PROCESS_DUP_HANDLE = 0x0040 @@ -1060,6 +1077,7 @@ const ( SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 + SIO_UDP_NETRESET = IOC_IN | IOC_VENDOR | 15 // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 @@ -1072,6 +1090,7 @@ const ( IP_ADD_MEMBERSHIP = 0xc IP_DROP_MEMBERSHIP = 0xd IP_PKTINFO = 0x13 + IP_MTU_DISCOVER = 0x47 IPV6_V6ONLY = 0x1b IPV6_UNICAST_HOPS = 0x4 @@ -1081,6 +1100,7 @@ const ( IPV6_JOIN_GROUP = 0xc IPV6_LEAVE_GROUP = 0xd IPV6_PKTINFO = 0x13 + IPV6_MTU_DISCOVER = 0x47 MSG_OOB = 0x1 MSG_PEEK = 0x2 @@ -1130,6 +1150,15 @@ const ( WSASYS_STATUS_LEN = 128 ) +// enum PMTUD_STATE from ws2ipdef.h +const ( + IP_PMTUDISC_NOT_SET = 0 + IP_PMTUDISC_DO = 1 + IP_PMTUDISC_DONT = 2 + IP_PMTUDISC_PROBE = 3 + IP_PMTUDISC_MAX = 4 +) + type WSABuf struct { Len uint32 Buf *byte @@ -1144,6 +1173,22 @@ type WSAMsg struct { Flags uint32 } +type WSACMSGHDR struct { + Len uintptr + Level int32 + Type int32 +} + +type IN_PKTINFO struct { + Addr [4]byte + Ifindex uint32 +} + +type IN6_PKTINFO struct { + Addr [16]byte + Ifindex uint32 +} + // Flags for WSASocket const ( WSA_FLAG_OVERLAPPED = 0x01 @@ -1947,6 +1992,12 @@ const ( SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1 ) +// FILE_ZERO_DATA_INFORMATION from winioctl.h +type FileZeroDataInformation struct { + FileOffset int64 + BeyondFinalZero int64 +} + const ( ComputerNameNetBIOS = 0 ComputerNameDnsHostname = 1 @@ -2003,7 +2054,21 @@ const ( MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20 ) -const GAA_FLAG_INCLUDE_PREFIX = 0x00000010 +// Flags for GetAdaptersAddresses, see +// https://learn.microsoft.com/en-us/windows/win32/api/iphlpapi/nf-iphlpapi-getadaptersaddresses. +const ( + GAA_FLAG_SKIP_UNICAST = 0x1 + GAA_FLAG_SKIP_ANYCAST = 0x2 + GAA_FLAG_SKIP_MULTICAST = 0x4 + GAA_FLAG_SKIP_DNS_SERVER = 0x8 + GAA_FLAG_INCLUDE_PREFIX = 0x10 + GAA_FLAG_SKIP_FRIENDLY_NAME = 0x20 + GAA_FLAG_INCLUDE_WINS_INFO = 0x40 + GAA_FLAG_INCLUDE_GATEWAYS = 0x80 + GAA_FLAG_INCLUDE_ALL_INTERFACES = 0x100 + GAA_FLAG_INCLUDE_ALL_COMPARTMENTS = 0x200 + GAA_FLAG_INCLUDE_TUNNEL_BINDINGORDER = 0x400 +) const ( IF_TYPE_OTHER = 1 @@ -2017,6 +2082,50 @@ const ( IF_TYPE_IEEE1394 = 144 ) +// Enum NL_PREFIX_ORIGIN for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_prefix_origin +const ( + IpPrefixOriginOther = 0 + IpPrefixOriginManual = 1 + IpPrefixOriginWellKnown = 2 + IpPrefixOriginDhcp = 3 + IpPrefixOriginRouterAdvertisement = 4 + IpPrefixOriginUnchanged = 1 << 4 +) + +// Enum NL_SUFFIX_ORIGIN for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_suffix_origin +const ( + NlsoOther = 0 + NlsoManual = 1 + NlsoWellKnown = 2 + NlsoDhcp = 3 + NlsoLinkLayerAddress = 4 + NlsoRandom = 5 + IpSuffixOriginOther = 0 + IpSuffixOriginManual = 1 + IpSuffixOriginWellKnown = 2 + IpSuffixOriginDhcp = 3 + IpSuffixOriginLinkLayerAddress = 4 + IpSuffixOriginRandom = 5 + IpSuffixOriginUnchanged = 1 << 4 +) + +// Enum NL_DAD_STATE for [IpAdapterUnicastAddress], see +// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_dad_state +const ( + NldsInvalid = 0 + NldsTentative = 1 + NldsDuplicate = 2 + NldsDeprecated = 3 + NldsPreferred = 4 + IpDadStateInvalid = 0 + IpDadStateTentative = 1 + IpDadStateDuplicate = 2 + IpDadStateDeprecated = 3 + IpDadStatePreferred = 4 +) + type SocketAddress struct { Sockaddr *syscall.RawSockaddrAny SockaddrLength int32 @@ -2144,6 +2253,132 @@ const ( IfOperStatusLowerLayerDown = 7 ) +const ( + IF_MAX_PHYS_ADDRESS_LENGTH = 32 + IF_MAX_STRING_SIZE = 256 +) + +// MIB_IF_ENTRY_LEVEL enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/nf-netioapi-getifentry2ex. +const ( + MibIfEntryNormal = 0 + MibIfEntryNormalWithoutStatistics = 2 +) + +// MIB_NOTIFICATION_TYPE enumeration from netioapi.h or +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ne-netioapi-mib_notification_type. +const ( + MibParameterNotification = 0 + MibAddInstance = 1 + MibDeleteInstance = 2 + MibInitialNotification = 3 +) + +// MibIfRow2 stores information about a particular interface. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_if_row2. +type MibIfRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + InterfaceGuid GUID + Alias [IF_MAX_STRING_SIZE + 1]uint16 + Description [IF_MAX_STRING_SIZE + 1]uint16 + PhysicalAddressLength uint32 + PhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + PermanentPhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8 + Mtu uint32 + Type uint32 + TunnelType uint32 + MediaType uint32 + PhysicalMediumType uint32 + AccessType uint32 + DirectionType uint32 + InterfaceAndOperStatusFlags uint8 + OperStatus uint32 + AdminStatus uint32 + MediaConnectState uint32 + NetworkGuid GUID + ConnectionType uint32 + TransmitLinkSpeed uint64 + ReceiveLinkSpeed uint64 + InOctets uint64 + InUcastPkts uint64 + InNUcastPkts uint64 + InDiscards uint64 + InErrors uint64 + InUnknownProtos uint64 + InUcastOctets uint64 + InMulticastOctets uint64 + InBroadcastOctets uint64 + OutOctets uint64 + OutUcastPkts uint64 + OutNUcastPkts uint64 + OutDiscards uint64 + OutErrors uint64 + OutUcastOctets uint64 + OutMulticastOctets uint64 + OutBroadcastOctets uint64 + OutQLen uint64 +} + +// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See +// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row. +type MibUnicastIpAddressRow struct { + Address RawSockaddrInet6 // SOCKADDR_INET union + InterfaceLuid uint64 + InterfaceIndex uint32 + PrefixOrigin uint32 + SuffixOrigin uint32 + ValidLifetime uint32 + PreferredLifetime uint32 + OnLinkPrefixLength uint8 + SkipAsSource uint8 + DadState uint32 + ScopeId uint32 + CreationTimeStamp Filetime +} + +const ScopeLevelCount = 16 + +// MIB_IPINTERFACE_ROW stores interface management information for a particular IP address family on a network interface. +// See https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipinterface_row. +type MibIpInterfaceRow struct { + Family uint16 + InterfaceLuid uint64 + InterfaceIndex uint32 + MaxReassemblySize uint32 + InterfaceIdentifier uint64 + MinRouterAdvertisementInterval uint32 + MaxRouterAdvertisementInterval uint32 + AdvertisingEnabled uint8 + ForwardingEnabled uint8 + WeakHostSend uint8 + WeakHostReceive uint8 + UseAutomaticMetric uint8 + UseNeighborUnreachabilityDetection uint8 + ManagedAddressConfigurationSupported uint8 + OtherStatefulConfigurationSupported uint8 + AdvertiseDefaultRoute uint8 + RouterDiscoveryBehavior uint32 + DadTransmits uint32 + BaseReachableTime uint32 + RetransmitTime uint32 + PathMtuDiscoveryTimeout uint32 + LinkLocalAddressBehavior uint32 + LinkLocalAddressTimeout uint32 + ZoneIndices [ScopeLevelCount]uint32 + SitePrefixLength uint32 + Metric uint32 + NlMtu uint32 + Connected uint8 + SupportsWakeUpPatterns uint8 + SupportsNeighborDiscovery uint8 + SupportsRouterDiscovery uint8 + ReachableTime uint32 + TransmitOffload uint32 + ReceiveOffload uint32 + DisableDefaultRoutes uint8 +} + // Console related constants used for the mode parameter to SetConsoleMode. See // https://docs.microsoft.com/en-us/windows/console/setconsolemode for details. @@ -2487,6 +2722,8 @@ type CommTimeouts struct { // NTUnicodeString is a UTF-16 string for NT native APIs, corresponding to UNICODE_STRING. type NTUnicodeString struct { + // Note: Length and MaximumLength are in *bytes*, not uint16s. + // They should always be even. Length uint16 MaximumLength uint16 Buffer *uint16 @@ -3404,3 +3641,224 @@ type DCB struct { EvtChar byte wReserved1 uint16 } + +// Keyboard Layout Flags. +// See https://learn.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-loadkeyboardlayoutw +const ( + KLF_ACTIVATE = 0x00000001 + KLF_SUBSTITUTE_OK = 0x00000002 + KLF_REORDER = 0x00000008 + KLF_REPLACELANG = 0x00000010 + KLF_NOTELLSHELL = 0x00000080 + KLF_SETFORPROCESS = 0x00000100 +) + +// Virtual Key codes +// https://docs.microsoft.com/en-us/windows/win32/inputdev/virtual-key-codes +const ( + VK_LBUTTON = 0x01 + VK_RBUTTON = 0x02 + VK_CANCEL = 0x03 + VK_MBUTTON = 0x04 + VK_XBUTTON1 = 0x05 + VK_XBUTTON2 = 0x06 + VK_BACK = 0x08 + VK_TAB = 0x09 + VK_CLEAR = 0x0C + VK_RETURN = 0x0D + VK_SHIFT = 0x10 + VK_CONTROL = 0x11 + VK_MENU = 0x12 + VK_PAUSE = 0x13 + VK_CAPITAL = 0x14 + VK_KANA = 0x15 + VK_HANGEUL = 0x15 + VK_HANGUL = 0x15 + VK_IME_ON = 0x16 + VK_JUNJA = 0x17 + VK_FINAL = 0x18 + VK_HANJA = 0x19 + VK_KANJI = 0x19 + VK_IME_OFF = 0x1A + VK_ESCAPE = 0x1B + VK_CONVERT = 0x1C + VK_NONCONVERT = 0x1D + VK_ACCEPT = 0x1E + VK_MODECHANGE = 0x1F + VK_SPACE = 0x20 + VK_PRIOR = 0x21 + VK_NEXT = 0x22 + VK_END = 0x23 + VK_HOME = 0x24 + VK_LEFT = 0x25 + VK_UP = 0x26 + VK_RIGHT = 0x27 + VK_DOWN = 0x28 + VK_SELECT = 0x29 + VK_PRINT = 0x2A + VK_EXECUTE = 0x2B + VK_SNAPSHOT = 0x2C + VK_INSERT = 0x2D + VK_DELETE = 0x2E + VK_HELP = 0x2F + VK_LWIN = 0x5B + VK_RWIN = 0x5C + VK_APPS = 0x5D + VK_SLEEP = 0x5F + VK_NUMPAD0 = 0x60 + VK_NUMPAD1 = 0x61 + VK_NUMPAD2 = 0x62 + VK_NUMPAD3 = 0x63 + VK_NUMPAD4 = 0x64 + VK_NUMPAD5 = 0x65 + VK_NUMPAD6 = 0x66 + VK_NUMPAD7 = 0x67 + VK_NUMPAD8 = 0x68 + VK_NUMPAD9 = 0x69 + VK_MULTIPLY = 0x6A + VK_ADD = 0x6B + VK_SEPARATOR = 0x6C + VK_SUBTRACT = 0x6D + VK_DECIMAL = 0x6E + VK_DIVIDE = 0x6F + VK_F1 = 0x70 + VK_F2 = 0x71 + VK_F3 = 0x72 + VK_F4 = 0x73 + VK_F5 = 0x74 + VK_F6 = 0x75 + VK_F7 = 0x76 + VK_F8 = 0x77 + VK_F9 = 0x78 + VK_F10 = 0x79 + VK_F11 = 0x7A + VK_F12 = 0x7B + VK_F13 = 0x7C + VK_F14 = 0x7D + VK_F15 = 0x7E + VK_F16 = 0x7F + VK_F17 = 0x80 + VK_F18 = 0x81 + VK_F19 = 0x82 + VK_F20 = 0x83 + VK_F21 = 0x84 + VK_F22 = 0x85 + VK_F23 = 0x86 + VK_F24 = 0x87 + VK_NUMLOCK = 0x90 + VK_SCROLL = 0x91 + VK_OEM_NEC_EQUAL = 0x92 + VK_OEM_FJ_JISHO = 0x92 + VK_OEM_FJ_MASSHOU = 0x93 + VK_OEM_FJ_TOUROKU = 0x94 + VK_OEM_FJ_LOYA = 0x95 + VK_OEM_FJ_ROYA = 0x96 + VK_LSHIFT = 0xA0 + VK_RSHIFT = 0xA1 + VK_LCONTROL = 0xA2 + VK_RCONTROL = 0xA3 + VK_LMENU = 0xA4 + VK_RMENU = 0xA5 + VK_BROWSER_BACK = 0xA6 + VK_BROWSER_FORWARD = 0xA7 + VK_BROWSER_REFRESH = 0xA8 + VK_BROWSER_STOP = 0xA9 + VK_BROWSER_SEARCH = 0xAA + VK_BROWSER_FAVORITES = 0xAB + VK_BROWSER_HOME = 0xAC + VK_VOLUME_MUTE = 0xAD + VK_VOLUME_DOWN = 0xAE + VK_VOLUME_UP = 0xAF + VK_MEDIA_NEXT_TRACK = 0xB0 + VK_MEDIA_PREV_TRACK = 0xB1 + VK_MEDIA_STOP = 0xB2 + VK_MEDIA_PLAY_PAUSE = 0xB3 + VK_LAUNCH_MAIL = 0xB4 + VK_LAUNCH_MEDIA_SELECT = 0xB5 + VK_LAUNCH_APP1 = 0xB6 + VK_LAUNCH_APP2 = 0xB7 + VK_OEM_1 = 0xBA + VK_OEM_PLUS = 0xBB + VK_OEM_COMMA = 0xBC + VK_OEM_MINUS = 0xBD + VK_OEM_PERIOD = 0xBE + VK_OEM_2 = 0xBF + VK_OEM_3 = 0xC0 + VK_OEM_4 = 0xDB + VK_OEM_5 = 0xDC + VK_OEM_6 = 0xDD + VK_OEM_7 = 0xDE + VK_OEM_8 = 0xDF + VK_OEM_AX = 0xE1 + VK_OEM_102 = 0xE2 + VK_ICO_HELP = 0xE3 + VK_ICO_00 = 0xE4 + VK_PROCESSKEY = 0xE5 + VK_ICO_CLEAR = 0xE6 + VK_OEM_RESET = 0xE9 + VK_OEM_JUMP = 0xEA + VK_OEM_PA1 = 0xEB + VK_OEM_PA2 = 0xEC + VK_OEM_PA3 = 0xED + VK_OEM_WSCTRL = 0xEE + VK_OEM_CUSEL = 0xEF + VK_OEM_ATTN = 0xF0 + VK_OEM_FINISH = 0xF1 + VK_OEM_COPY = 0xF2 + VK_OEM_AUTO = 0xF3 + VK_OEM_ENLW = 0xF4 + VK_OEM_BACKTAB = 0xF5 + VK_ATTN = 0xF6 + VK_CRSEL = 0xF7 + VK_EXSEL = 0xF8 + VK_EREOF = 0xF9 + VK_PLAY = 0xFA + VK_ZOOM = 0xFB + VK_NONAME = 0xFC + VK_PA1 = 0xFD + VK_OEM_CLEAR = 0xFE +) + +// Mouse button constants. +// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str +const ( + FROM_LEFT_1ST_BUTTON_PRESSED = 0x0001 + RIGHTMOST_BUTTON_PRESSED = 0x0002 + FROM_LEFT_2ND_BUTTON_PRESSED = 0x0004 + FROM_LEFT_3RD_BUTTON_PRESSED = 0x0008 + FROM_LEFT_4TH_BUTTON_PRESSED = 0x0010 +) + +// Control key state constaints. +// https://docs.microsoft.com/en-us/windows/console/key-event-record-str +// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str +const ( + CAPSLOCK_ON = 0x0080 + ENHANCED_KEY = 0x0100 + LEFT_ALT_PRESSED = 0x0002 + LEFT_CTRL_PRESSED = 0x0008 + NUMLOCK_ON = 0x0020 + RIGHT_ALT_PRESSED = 0x0001 + RIGHT_CTRL_PRESSED = 0x0004 + SCROLLLOCK_ON = 0x0040 + SHIFT_PRESSED = 0x0010 +) + +// Mouse event record event flags. +// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str +const ( + MOUSE_MOVED = 0x0001 + DOUBLE_CLICK = 0x0002 + MOUSE_WHEELED = 0x0004 + MOUSE_HWHEELED = 0x0008 +) + +// Input Record Event Types +// https://learn.microsoft.com/en-us/windows/console/input-record-str +const ( + FOCUS_EVENT = 0x0010 + KEY_EVENT = 0x0001 + MENU_EVENT = 0x0008 + MOUSE_EVENT = 0x0002 + WINDOW_BUFFER_SIZE_EVENT = 0x0004 +) diff --git a/src/nvcgo/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/src/nvcgo/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 9f73df75b..426151a01 100644 --- a/src/nvcgo/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/src/nvcgo/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -91,6 +91,7 @@ var ( procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") procEqualSid = modadvapi32.NewProc("EqualSid") procFreeSid = modadvapi32.NewProc("FreeSid") + procGetAce = modadvapi32.NewProc("GetAce") procGetLengthSid = modadvapi32.NewProc("GetLengthSid") procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") @@ -180,10 +181,15 @@ var ( procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute") procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute") + procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2") procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex") + procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry") + procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange") + procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange") procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") @@ -232,6 +238,7 @@ var ( procFindResourceW = modkernel32.NewProc("FindResourceW") procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") + procFlushConsoleInputBuffer = modkernel32.NewProc("FlushConsoleInputBuffer") procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") procFormatMessageW = modkernel32.NewProc("FormatMessageW") @@ -246,7 +253,9 @@ var ( procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetConsoleCP = modkernel32.NewProc("GetConsoleCP") procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procGetConsoleOutputCP = modkernel32.NewProc("GetConsoleOutputCP") procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") @@ -272,8 +281,11 @@ var ( procGetMaximumProcessorCount = modkernel32.NewProc("GetMaximumProcessorCount") procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") + procGetNamedPipeClientProcessId = modkernel32.NewProc("GetNamedPipeClientProcessId") procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId") + procGetNumberOfConsoleInputEvents = modkernel32.NewProc("GetNumberOfConsoleInputEvents") procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") procGetProcAddress = modkernel32.NewProc("GetProcAddress") @@ -346,8 +358,10 @@ var ( procSetCommMask = modkernel32.NewProc("SetCommMask") procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") + procSetConsoleCP = modkernel32.NewProc("SetConsoleCP") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procSetConsoleOutputCP = modkernel32.NewProc("SetConsoleOutputCP") procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") @@ -477,12 +491,16 @@ var ( procGetDesktopWindow = moduser32.NewProc("GetDesktopWindow") procGetForegroundWindow = moduser32.NewProc("GetForegroundWindow") procGetGUIThreadInfo = moduser32.NewProc("GetGUIThreadInfo") + procGetKeyboardLayout = moduser32.NewProc("GetKeyboardLayout") procGetShellWindow = moduser32.NewProc("GetShellWindow") procGetWindowThreadProcessId = moduser32.NewProc("GetWindowThreadProcessId") procIsWindow = moduser32.NewProc("IsWindow") procIsWindowUnicode = moduser32.NewProc("IsWindowUnicode") procIsWindowVisible = moduser32.NewProc("IsWindowVisible") + procLoadKeyboardLayoutW = moduser32.NewProc("LoadKeyboardLayoutW") procMessageBoxW = moduser32.NewProc("MessageBoxW") + procToUnicodeEx = moduser32.NewProc("ToUnicodeEx") + procUnloadKeyboardLayout = moduser32.NewProc("UnloadKeyboardLayout") procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") @@ -495,6 +513,7 @@ var ( procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") procWSACleanup = modws2_32.NewProc("WSACleanup") + procWSADuplicateSocketW = modws2_32.NewProc("WSADuplicateSocketW") procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") procWSAIoctl = modws2_32.NewProc("WSAIoctl") @@ -529,25 +548,25 @@ var ( ) func cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_Status.Addr(), 4, uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags), 0, 0) + r0, _, _ := syscall.SyscallN(procCM_Get_DevNode_Status.Addr(), uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_ListW.Addr(), 5, uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags), 0) + r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_ListW.Addr(), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_List_SizeW.Addr(), 4, uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags), 0, 0) + r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_List_SizeW.Addr(), uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags)) ret = CONFIGRET(r0) return } func cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) { - r0, _, _ := syscall.Syscall(procCM_MapCrToWin32Err.Addr(), 2, uintptr(configRet), uintptr(defaultWin32Error), 0) + r0, _, _ := syscall.SyscallN(procCM_MapCrToWin32Err.Addr(), uintptr(configRet), uintptr(defaultWin32Error)) ret = Errno(r0) return } @@ -557,7 +576,7 @@ func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, if resetToDefault { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + r1, _, e1 := syscall.SyscallN(procAdjustTokenGroups.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) if r1 == 0 { err = errnoErr(e1) } @@ -569,7 +588,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok if disableAllPrivileges { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) + r1, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) if r1 == 0 { err = errnoErr(e1) } @@ -577,7 +596,7 @@ func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tok } func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) + r1, _, e1 := syscall.SyscallN(procAllocateAndInitializeSid.Addr(), uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid))) if r1 == 0 { err = errnoErr(e1) } @@ -585,7 +604,7 @@ func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, s } func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) + r0, _, _ := syscall.SyscallN(procBuildSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -593,7 +612,7 @@ func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries } func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) + r1, _, e1 := syscall.SyscallN(procChangeServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -601,7 +620,7 @@ func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err err } func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) + r1, _, e1 := syscall.SyscallN(procChangeServiceConfigW.Addr(), uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName))) if r1 == 0 { err = errnoErr(e1) } @@ -609,7 +628,7 @@ func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, e } func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { - r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) + r1, _, e1 := syscall.SyscallN(procCheckTokenMembership.Addr(), uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) if r1 == 0 { err = errnoErr(e1) } @@ -617,7 +636,7 @@ func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) ( } func CloseServiceHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procCloseServiceHandle.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -625,7 +644,7 @@ func CloseServiceHandle(handle Handle) (err error) { } func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) + r1, _, e1 := syscall.SyscallN(procControlService.Addr(), uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -633,7 +652,7 @@ func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err } func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0) + r1, _, e1 := syscall.SyscallN(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen))) if r1 == 0 { err = errnoErr(e1) } @@ -641,7 +660,7 @@ func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR } func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) + r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid))) if r1 == 0 { err = errnoErr(e1) } @@ -658,7 +677,7 @@ func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision ui } func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -666,7 +685,7 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision } func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) + r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid))) if r1 == 0 { err = errnoErr(e1) } @@ -674,7 +693,7 @@ func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { } func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) + r1, _, e1 := syscall.SyscallN(procCopySid.Addr(), uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) if r1 == 0 { err = errnoErr(e1) } @@ -686,7 +705,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc if inheritHandles { _p0 = 1 } - r1, _, e1 := syscall.Syscall12(procCreateProcessAsUserW.Addr(), 11, uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0) + r1, _, e1 := syscall.SyscallN(procCreateProcessAsUserW.Addr(), uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo))) if r1 == 0 { err = errnoErr(e1) } @@ -694,7 +713,7 @@ func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, proc } func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -703,7 +722,7 @@ func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access } func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreateWellKnownSid.Addr(), uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid))) if r1 == 0 { err = errnoErr(e1) } @@ -711,7 +730,7 @@ func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, s } func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptAcquireContextW.Addr(), uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -719,7 +738,7 @@ func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16 } func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { - r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) + r1, _, e1 := syscall.SyscallN(procCryptGenRandom.Addr(), uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) if r1 == 0 { err = errnoErr(e1) } @@ -727,7 +746,7 @@ func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { } func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCryptReleaseContext.Addr(), uintptr(provhandle), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -735,7 +754,7 @@ func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { } func DeleteService(service Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteService.Addr(), uintptr(service)) if r1 == 0 { err = errnoErr(e1) } @@ -743,7 +762,7 @@ func DeleteService(service Handle) (err error) { } func DeregisterEventSource(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeregisterEventSource.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -751,7 +770,7 @@ func DeregisterEventSource(handle Handle) (err error) { } func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { - r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) + r1, _, e1 := syscall.SyscallN(procDuplicateTokenEx.Addr(), uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) if r1 == 0 { err = errnoErr(e1) } @@ -759,7 +778,7 @@ func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes } func EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumDependentServicesW.Addr(), 6, uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) + r1, _, e1 := syscall.SyscallN(procEnumDependentServicesW.Addr(), uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned))) if r1 == 0 { err = errnoErr(e1) } @@ -767,7 +786,7 @@ func EnumDependentServices(service Handle, activityState uint32, services *ENUM_ } func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) + r1, _, e1 := syscall.SyscallN(procEnumServicesStatusExW.Addr(), uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName))) if r1 == 0 { err = errnoErr(e1) } @@ -775,21 +794,29 @@ func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serv } func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { - r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) + r0, _, _ := syscall.SyscallN(procEqualSid.Addr(), uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2))) isEqual = r0 != 0 return } func FreeSid(sid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeSid.Addr(), uintptr(unsafe.Pointer(sid))) if r1 != 0 { err = errnoErr(e1) } return } +func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) { + r1, _, e1 := syscall.SyscallN(procGetAce.Addr(), uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetLengthSid(sid *SID) (len uint32) { - r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetLengthSid.Addr(), uintptr(unsafe.Pointer(sid))) len = uint32(r0) return } @@ -804,7 +831,7 @@ func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security } func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + r0, _, _ := syscall.SyscallN(procGetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -812,7 +839,7 @@ func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi } func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) if r1 == 0 { err = errnoErr(e1) } @@ -828,7 +855,7 @@ func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl if *daclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1))) *daclPresent = _p0 != 0 *daclDefaulted = _p1 != 0 if r1 == 0 { @@ -842,7 +869,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau if *groupDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) *groupDefaulted = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -851,7 +878,7 @@ func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefau } func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorLength.Addr(), uintptr(unsafe.Pointer(sd))) len = uint32(r0) return } @@ -861,7 +888,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau if *ownerDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) *ownerDefaulted = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -870,7 +897,7 @@ func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefau } func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -886,7 +913,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl if *saclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1))) *saclPresent = _p0 != 0 *saclDefaulted = _p1 != 0 if r1 == 0 { @@ -896,7 +923,7 @@ func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl } func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + r0, _, _ := syscall.SyscallN(procGetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -904,25 +931,25 @@ func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati } func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { - r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSidIdentifierAuthority.Addr(), uintptr(unsafe.Pointer(sid))) authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) return } func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) + r0, _, _ := syscall.SyscallN(procGetSidSubAuthority.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(index)) subAuthority = (*uint32)(unsafe.Pointer(r0)) return } func getSidSubAuthorityCount(sid *SID) (count *uint8) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetSidSubAuthorityCount.Addr(), uintptr(unsafe.Pointer(sid))) count = (*uint8)(unsafe.Pointer(r0)) return } func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) + r1, _, e1 := syscall.SyscallN(procGetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen))) if r1 == 0 { err = errnoErr(e1) } @@ -930,7 +957,7 @@ func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint } func ImpersonateSelf(impersonationlevel uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) + r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(impersonationlevel)) if r1 == 0 { err = errnoErr(e1) } @@ -938,7 +965,7 @@ func ImpersonateSelf(impersonationlevel uint32) (err error) { } func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) { - r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0) + r1, _, e1 := syscall.SyscallN(procInitializeSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision)) if r1 == 0 { err = errnoErr(e1) } @@ -954,7 +981,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint if rebootAfterShutdown { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) + r1, _, e1 := syscall.SyscallN(procInitiateSystemShutdownExW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) if r1 == 0 { err = errnoErr(e1) } @@ -962,7 +989,7 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint } func isTokenRestricted(tokenHandle Token) (ret bool, err error) { - r0, _, e1 := syscall.Syscall(procIsTokenRestricted.Addr(), 1, uintptr(tokenHandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procIsTokenRestricted.Addr(), uintptr(tokenHandle)) ret = r0 != 0 if !ret { err = errnoErr(e1) @@ -971,25 +998,25 @@ func isTokenRestricted(tokenHandle Token) (ret bool, err error) { } func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + r0, _, _ := syscall.SyscallN(procIsValidSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(sd))) isValid = r0 != 0 return } func isValidSid(sid *SID) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + r0, _, _ := syscall.SyscallN(procIsValidSid.Addr(), uintptr(unsafe.Pointer(sid))) isValid = r0 != 0 return } func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { - r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) + r0, _, _ := syscall.SyscallN(procIsWellKnownSid.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(sidType)) isWellKnown = r0 != 0 return } func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use))) if r1 == 0 { err = errnoErr(e1) } @@ -997,7 +1024,7 @@ func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen } func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use))) if r1 == 0 { err = errnoErr(e1) } @@ -1005,7 +1032,7 @@ func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint3 } func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) if r1 == 0 { err = errnoErr(e1) } @@ -1013,7 +1040,7 @@ func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err err } func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0) + r1, _, e1 := syscall.SyscallN(procMakeAbsoluteSD.Addr(), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize))) if r1 == 0 { err = errnoErr(e1) } @@ -1021,7 +1048,7 @@ func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DE } func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) + r1, _, e1 := syscall.SyscallN(procMakeSelfRelativeSD.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) if r1 == 0 { err = errnoErr(e1) } @@ -1029,7 +1056,7 @@ func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURIT } func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { - r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) + r0, _, _ := syscall.SyscallN(procNotifyServiceStatusChangeW.Addr(), uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1037,7 +1064,7 @@ func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERV } func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) + r1, _, e1 := syscall.SyscallN(procOpenProcessToken.Addr(), uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -1045,7 +1072,7 @@ func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { } func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) + r0, _, e1 := syscall.SyscallN(procOpenSCManagerW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1054,7 +1081,7 @@ func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (ha } func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) + r0, _, e1 := syscall.SyscallN(procOpenServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1067,7 +1094,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token if openAsSelf { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } @@ -1075,7 +1102,7 @@ func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token } func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1083,7 +1110,7 @@ func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize } func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceConfigW.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1095,7 +1122,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf if err != nil { return } - r1, _, e1 := syscall.Syscall(procQueryServiceDynamicInformation.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo)) + r1, _, e1 := syscall.SyscallN(procQueryServiceDynamicInformation.Addr(), uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo)) if r1 == 0 { err = errnoErr(e1) } @@ -1103,7 +1130,7 @@ func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInf } func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceLockStatusW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1111,7 +1138,7 @@ func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, b } func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -1119,7 +1146,7 @@ func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { } func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + r1, _, e1 := syscall.SyscallN(procQueryServiceStatusEx.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -1127,7 +1154,7 @@ func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize } func RegCloseKey(key Handle) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) + r0, _, _ := syscall.SyscallN(procRegCloseKey.Addr(), uintptr(key)) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1135,7 +1162,7 @@ func RegCloseKey(key Handle) (regerrno error) { } func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) + r0, _, _ := syscall.SyscallN(procRegEnumKeyExW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1151,7 +1178,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, if asynchronous { _p1 = 1 } - r0, _, _ := syscall.Syscall6(procRegNotifyChangeKeyValue.Addr(), 5, uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1), 0) + r0, _, _ := syscall.SyscallN(procRegNotifyChangeKeyValue.Addr(), uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1)) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1159,7 +1186,7 @@ func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, } func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) + r0, _, _ := syscall.SyscallN(procRegOpenKeyExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1167,7 +1194,7 @@ func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint } func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) + r0, _, _ := syscall.SyscallN(procRegQueryInfoKeyW.Addr(), uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1175,7 +1202,7 @@ func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint } func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) + r0, _, _ := syscall.SyscallN(procRegQueryValueExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) if r0 != 0 { regerrno = syscall.Errno(r0) } @@ -1183,7 +1210,7 @@ func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32 } func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) + r0, _, e1 := syscall.SyscallN(procRegisterEventSourceW.Addr(), uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1192,7 +1219,7 @@ func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Hand } func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterServiceCtrlHandlerExW.Addr(), 3, uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context)) + r0, _, e1 := syscall.SyscallN(procRegisterServiceCtrlHandlerExW.Addr(), uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1201,7 +1228,7 @@ func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, cont } func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) + r1, _, e1 := syscall.SyscallN(procReportEventW.Addr(), uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) if r1 == 0 { err = errnoErr(e1) } @@ -1209,7 +1236,7 @@ func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrS } func RevertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr()) if r1 == 0 { err = errnoErr(e1) } @@ -1217,7 +1244,7 @@ func RevertToSelf() (err error) { } func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) { - r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetEntriesInAclW.Addr(), uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1225,7 +1252,7 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE } func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { - r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) + r1, _, e1 := syscall.SyscallN(procSetKernelObjectSecurity.Addr(), uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) if r1 == 0 { err = errnoErr(e1) } @@ -1242,7 +1269,7 @@ func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, security } func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1250,7 +1277,7 @@ func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securi } func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) { - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) if r1 == 0 { err = errnoErr(e1) } @@ -1266,7 +1293,7 @@ func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl * if daclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1)) if r1 == 0 { err = errnoErr(e1) } @@ -1278,7 +1305,7 @@ func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaul if groupDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -1290,7 +1317,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul if ownerDefaulted { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -1298,7 +1325,7 @@ func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaul } func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) { - syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + syscall.SyscallN(procSetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl))) return } @@ -1311,7 +1338,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl * if saclDefaulted { _p1 = 1 } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1)) if r1 == 0 { err = errnoErr(e1) } @@ -1319,7 +1346,7 @@ func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl * } func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + r0, _, _ := syscall.SyscallN(procSetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1327,7 +1354,7 @@ func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformati } func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) + r1, _, e1 := syscall.SyscallN(procSetServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceStatus))) if r1 == 0 { err = errnoErr(e1) } @@ -1335,7 +1362,7 @@ func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) } func SetThreadToken(thread *Handle, token Token) (err error) { - r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) + r1, _, e1 := syscall.SyscallN(procSetThreadToken.Addr(), uintptr(unsafe.Pointer(thread)), uintptr(token)) if r1 == 0 { err = errnoErr(e1) } @@ -1343,7 +1370,7 @@ func SetThreadToken(thread *Handle, token Token) (err error) { } func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen)) if r1 == 0 { err = errnoErr(e1) } @@ -1351,7 +1378,7 @@ func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint } func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) + r1, _, e1 := syscall.SyscallN(procStartServiceCtrlDispatcherW.Addr(), uintptr(unsafe.Pointer(serviceTable))) if r1 == 0 { err = errnoErr(e1) } @@ -1359,7 +1386,7 @@ func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { } func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) + r1, _, e1 := syscall.SyscallN(procStartServiceW.Addr(), uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) if r1 == 0 { err = errnoErr(e1) } @@ -1367,7 +1394,7 @@ func StartService(service Handle, numArgs uint32, argVectors **uint16) (err erro } func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { - r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertAddCertificateContextToStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext))) if r1 == 0 { err = errnoErr(e1) } @@ -1375,7 +1402,7 @@ func CertAddCertificateContextToStore(store Handle, certContext *CertContext, ad } func CertCloseStore(store Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procCertCloseStore.Addr(), uintptr(store), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -1383,7 +1410,7 @@ func CertCloseStore(store Handle, flags uint32) (err error) { } func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) + r0, _, e1 := syscall.SyscallN(procCertCreateCertificateContext.Addr(), uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) context = (*CertContext)(unsafe.Pointer(r0)) if context == nil { err = errnoErr(e1) @@ -1392,7 +1419,7 @@ func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, en } func CertDeleteCertificateFromStore(certContext *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertDeleteCertificateFromStore.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertDeleteCertificateFromStore.Addr(), uintptr(unsafe.Pointer(certContext))) if r1 == 0 { err = errnoErr(e1) } @@ -1400,13 +1427,13 @@ func CertDeleteCertificateFromStore(certContext *CertContext) (err error) { } func CertDuplicateCertificateContext(certContext *CertContext) (dupContext *CertContext) { - r0, _, _ := syscall.Syscall(procCertDuplicateCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0) + r0, _, _ := syscall.SyscallN(procCertDuplicateCertificateContext.Addr(), uintptr(unsafe.Pointer(certContext))) dupContext = (*CertContext)(unsafe.Pointer(r0)) return } func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) + r0, _, e1 := syscall.SyscallN(procCertEnumCertificatesInStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(prevContext))) context = (*CertContext)(unsafe.Pointer(r0)) if context == nil { err = errnoErr(e1) @@ -1415,7 +1442,7 @@ func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (contex } func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) { - r0, _, e1 := syscall.Syscall6(procCertFindCertificateInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext))) + r0, _, e1 := syscall.SyscallN(procCertFindCertificateInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext))) cert = (*CertContext)(unsafe.Pointer(r0)) if cert == nil { err = errnoErr(e1) @@ -1424,7 +1451,7 @@ func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags } func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) { - r0, _, e1 := syscall.Syscall6(procCertFindChainInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext))) + r0, _, e1 := syscall.SyscallN(procCertFindChainInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext))) certchain = (*CertChainContext)(unsafe.Pointer(r0)) if certchain == nil { err = errnoErr(e1) @@ -1433,18 +1460,18 @@ func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint3 } func CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) { - r0, _, _ := syscall.Syscall(procCertFindExtension.Addr(), 3, uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions))) + r0, _, _ := syscall.SyscallN(procCertFindExtension.Addr(), uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions))) ret = (*CertExtension)(unsafe.Pointer(r0)) return } func CertFreeCertificateChain(ctx *CertChainContext) { - syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + syscall.SyscallN(procCertFreeCertificateChain.Addr(), uintptr(unsafe.Pointer(ctx))) return } func CertFreeCertificateContext(ctx *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertFreeCertificateContext.Addr(), uintptr(unsafe.Pointer(ctx))) if r1 == 0 { err = errnoErr(e1) } @@ -1452,7 +1479,7 @@ func CertFreeCertificateContext(ctx *CertContext) (err error) { } func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { - r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) + r1, _, e1 := syscall.SyscallN(procCertGetCertificateChain.Addr(), uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx))) if r1 == 0 { err = errnoErr(e1) } @@ -1460,13 +1487,13 @@ func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, a } func CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) { - r0, _, _ := syscall.Syscall6(procCertGetNameStringW.Addr(), 6, uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size)) + r0, _, _ := syscall.SyscallN(procCertGetNameStringW.Addr(), uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size)) chars = uint32(r0) return } func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) + r0, _, e1 := syscall.SyscallN(procCertOpenStore.Addr(), uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1475,7 +1502,7 @@ func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptPr } func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) + r0, _, e1 := syscall.SyscallN(procCertOpenSystemStoreW.Addr(), uintptr(hprov), uintptr(unsafe.Pointer(name))) store = Handle(r0) if store == 0 { err = errnoErr(e1) @@ -1484,7 +1511,7 @@ func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { } func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { - r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCertVerifyCertificateChainPolicy.Addr(), uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status))) if r1 == 0 { err = errnoErr(e1) } @@ -1496,7 +1523,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete if *callerFreeProvOrNCryptKey { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procCryptAcquireCertificatePrivateKey.Addr(), 6, uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0))) + r1, _, e1 := syscall.SyscallN(procCryptAcquireCertificatePrivateKey.Addr(), uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0))) *callerFreeProvOrNCryptKey = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -1505,7 +1532,7 @@ func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, paramete } func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptDecodeObject.Addr(), 7, uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptDecodeObject.Addr(), uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen))) if r1 == 0 { err = errnoErr(e1) } @@ -1513,7 +1540,7 @@ func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte } func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptProtectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptProtectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut))) if r1 == 0 { err = errnoErr(e1) } @@ -1521,7 +1548,7 @@ func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, } func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall12(procCryptQueryObject.Addr(), 11, uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context)), 0) + r1, _, e1 := syscall.SyscallN(procCryptQueryObject.Addr(), uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context))) if r1 == 0 { err = errnoErr(e1) } @@ -1529,7 +1556,7 @@ func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentT } func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) { - r1, _, e1 := syscall.Syscall9(procCryptUnprotectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCryptUnprotectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut))) if r1 == 0 { err = errnoErr(e1) } @@ -1537,7 +1564,7 @@ func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBl } func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procPFXImportCertStore.Addr(), 3, uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procPFXImportCertStore.Addr(), uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags)) store = Handle(r0) if store == 0 { err = errnoErr(e1) @@ -1546,7 +1573,7 @@ func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (sto } func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { - r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) + r0, _, _ := syscall.SyscallN(procDnsNameCompare_W.Addr(), uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2))) same = r0 != 0 return } @@ -1561,7 +1588,7 @@ func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSR } func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { - r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) + r0, _, _ := syscall.SyscallN(procDnsQuery_W.Addr(), uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) if r0 != 0 { status = syscall.Errno(r0) } @@ -1569,12 +1596,12 @@ func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DN } func DnsRecordListFree(rl *DNSRecord, freetype uint32) { - syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) + syscall.SyscallN(procDnsRecordListFree.Addr(), uintptr(unsafe.Pointer(rl)), uintptr(freetype)) return } func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { - r0, _, _ := syscall.Syscall6(procDwmGetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + r0, _, _ := syscall.SyscallN(procDwmGetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -1582,15 +1609,23 @@ func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si } func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) { - r0, _, _ := syscall.Syscall6(procDwmSetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0) + r0, _, _ := syscall.SyscallN(procDwmSetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size)) if r0 != 0 { ret = syscall.Errno(r0) } return } +func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) { + r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle)) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { - r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) + r0, _, _ := syscall.SyscallN(procGetAdaptersAddresses.Addr(), uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1598,7 +1633,7 @@ func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapter } func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) + r0, _, _ := syscall.SyscallN(procGetAdaptersInfo.Addr(), uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1606,7 +1641,7 @@ func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { } func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetBestInterfaceEx.Addr(), 2, uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)), 0) + r0, _, _ := syscall.SyscallN(procGetBestInterfaceEx.Addr(), uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1614,7 +1649,47 @@ func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcod } func GetIfEntry(pIfRow *MibIfRow) (errcode error) { - r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetIfEntry.Addr(), uintptr(unsafe.Pointer(pIfRow))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) { + r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) { + var _p0 uint32 + if initialNotification { + _p0 = 1 + } + r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle))) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -1622,7 +1697,7 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { } func AddDllDirectory(path *uint16) (cookie uintptr, err error) { - r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r0, _, e1 := syscall.SyscallN(procAddDllDirectory.Addr(), uintptr(unsafe.Pointer(path))) cookie = uintptr(r0) if cookie == 0 { err = errnoErr(e1) @@ -1631,7 +1706,7 @@ func AddDllDirectory(path *uint16) (cookie uintptr, err error) { } func AssignProcessToJobObject(job Handle, process Handle) (err error) { - r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) + r1, _, e1 := syscall.SyscallN(procAssignProcessToJobObject.Addr(), uintptr(job), uintptr(process)) if r1 == 0 { err = errnoErr(e1) } @@ -1639,7 +1714,7 @@ func AssignProcessToJobObject(job Handle, process Handle) (err error) { } func CancelIo(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) + r1, _, e1 := syscall.SyscallN(procCancelIo.Addr(), uintptr(s)) if r1 == 0 { err = errnoErr(e1) } @@ -1647,7 +1722,7 @@ func CancelIo(s Handle) (err error) { } func CancelIoEx(s Handle, o *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) + r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(s), uintptr(unsafe.Pointer(o))) if r1 == 0 { err = errnoErr(e1) } @@ -1655,7 +1730,7 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) { } func ClearCommBreak(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procClearCommBreak.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1663,7 +1738,7 @@ func ClearCommBreak(handle Handle) (err error) { } func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) { - r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) + r1, _, e1 := syscall.SyscallN(procClearCommError.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) if r1 == 0 { err = errnoErr(e1) } @@ -1671,7 +1746,7 @@ func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error } func CloseHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procCloseHandle.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1679,12 +1754,12 @@ func CloseHandle(handle Handle) (err error) { } func ClosePseudoConsole(console Handle) { - syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(console), 0, 0) + syscall.SyscallN(procClosePseudoConsole.Addr(), uintptr(console)) return } func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -1692,7 +1767,7 @@ func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { } func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { - r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) + r1, _, e1 := syscall.SyscallN(procCreateDirectoryW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa))) if r1 == 0 { err = errnoErr(e1) } @@ -1700,7 +1775,7 @@ func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { } func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateEventExW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess)) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1709,7 +1784,7 @@ func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, d } func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateEventW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1718,7 +1793,7 @@ func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialStat } func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procCreateFileMappingW.Addr(), uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1727,7 +1802,7 @@ func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxS } func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1736,7 +1811,7 @@ func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes } func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procCreateHardLinkW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) if r1&0xff == 0 { err = errnoErr(e1) } @@ -1744,7 +1819,7 @@ func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr } func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1753,7 +1828,7 @@ func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, thr } func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) + r0, _, e1 := syscall.SyscallN(procCreateJobObjectW.Addr(), uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -1762,7 +1837,7 @@ func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, } func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + r0, _, e1 := syscall.SyscallN(procCreateMutexExW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess)) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1775,7 +1850,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16 if initialOwner { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procCreateMutexW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 || e1 == ERROR_ALREADY_EXISTS { err = errnoErr(e1) @@ -1784,7 +1859,7 @@ func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16 } func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa))) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1793,7 +1868,7 @@ func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances u } func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreatePipe.Addr(), uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -1805,7 +1880,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA if inheritHandles { _p0 = 1 } - r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) + r1, _, e1 := syscall.SyscallN(procCreateProcessW.Addr(), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo))) if r1 == 0 { err = errnoErr(e1) } @@ -1813,7 +1888,7 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA } func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) { - r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)), 0) + r0, _, _ := syscall.SyscallN(procCreatePseudoConsole.Addr(), uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole))) if r0 != 0 { hr = syscall.Errno(r0) } @@ -1821,7 +1896,7 @@ func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pcons } func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procCreateSymbolicLinkW.Addr(), uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) if r1&0xff == 0 { err = errnoErr(e1) } @@ -1829,7 +1904,7 @@ func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags u } func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) + r0, _, e1 := syscall.SyscallN(procCreateToolhelp32Snapshot.Addr(), uintptr(flags), uintptr(processId)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1838,7 +1913,7 @@ func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, er } func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) + r1, _, e1 := syscall.SyscallN(procDefineDosDeviceW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) if r1 == 0 { err = errnoErr(e1) } @@ -1846,7 +1921,7 @@ func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err } func DeleteFile(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteFileW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -1854,12 +1929,12 @@ func DeleteFile(path *uint16) (err error) { } func deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) { - syscall.Syscall(procDeleteProcThreadAttributeList.Addr(), 1, uintptr(unsafe.Pointer(attrlist)), 0, 0) + syscall.SyscallN(procDeleteProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist))) return } func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDeleteVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint))) if r1 == 0 { err = errnoErr(e1) } @@ -1867,7 +1942,7 @@ func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { } func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procDeviceIoControl.Addr(), uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -1875,7 +1950,7 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff } func DisconnectNamedPipe(pipe Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0) + r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe)) if r1 == 0 { err = errnoErr(e1) } @@ -1887,7 +1962,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP if bInheritHandle { _p0 = 1 } - r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) + r1, _, e1 := syscall.SyscallN(procDuplicateHandle.Addr(), uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions)) if r1 == 0 { err = errnoErr(e1) } @@ -1895,7 +1970,7 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP } func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { - r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0) + r1, _, e1 := syscall.SyscallN(procEscapeCommFunction.Addr(), uintptr(handle), uintptr(dwFunc)) if r1 == 0 { err = errnoErr(e1) } @@ -1903,12 +1978,12 @@ func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { } func ExitProcess(exitcode uint32) { - syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) + syscall.SyscallN(procExitProcess.Addr(), uintptr(exitcode)) return } func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -1917,7 +1992,7 @@ func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, } func FindClose(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindClose.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1925,7 +2000,7 @@ func FindClose(handle Handle) (err error) { } func FindCloseChangeNotification(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindCloseChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindCloseChangeNotification.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1946,7 +2021,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter if watchSubtree { _p1 = 1 } - r0, _, e1 := syscall.Syscall(procFindFirstChangeNotificationW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter)) + r0, _, e1 := syscall.SyscallN(procFindFirstChangeNotificationW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1955,7 +2030,7 @@ func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter } func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) + r0, _, e1 := syscall.SyscallN(procFindFirstFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data))) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1964,7 +2039,7 @@ func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err erro } func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + r0, _, e1 := syscall.SyscallN(procFindFirstVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1973,7 +2048,7 @@ func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, b } func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) + r0, _, e1 := syscall.SyscallN(procFindFirstVolumeW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -1982,7 +2057,7 @@ func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, er } func FindNextChangeNotification(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextChangeNotification.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindNextChangeNotification.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -1990,7 +2065,7 @@ func FindNextChangeNotification(handle Handle) (err error) { } func findNextFile1(handle Handle, data *win32finddata1) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + r1, _, e1 := syscall.SyscallN(procFindNextFileW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data))) if r1 == 0 { err = errnoErr(e1) } @@ -1998,7 +2073,7 @@ func findNextFile1(handle Handle, data *win32finddata1) (err error) { } func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procFindNextVolumeMountPointW.Addr(), uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2006,7 +2081,7 @@ func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uin } func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procFindNextVolumeW.Addr(), uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2014,7 +2089,7 @@ func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) } func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindResourceW.Addr(), 3, uintptr(module), uintptr(name), uintptr(resType)) + r0, _, e1 := syscall.SyscallN(procFindResourceW.Addr(), uintptr(module), uintptr(name), uintptr(resType)) resInfo = Handle(r0) if resInfo == 0 { err = errnoErr(e1) @@ -2023,7 +2098,7 @@ func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, } func FindVolumeClose(findVolume Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindVolumeClose.Addr(), uintptr(findVolume)) if r1 == 0 { err = errnoErr(e1) } @@ -2031,7 +2106,15 @@ func FindVolumeClose(findVolume Handle) (err error) { } func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) + r1, _, e1 := syscall.SyscallN(procFindVolumeMountPointClose.Addr(), uintptr(findVolumeMountPoint)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func FlushConsoleInputBuffer(console Handle) (err error) { + r1, _, e1 := syscall.SyscallN(procFlushConsoleInputBuffer.Addr(), uintptr(console)) if r1 == 0 { err = errnoErr(e1) } @@ -2039,7 +2122,7 @@ func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { } func FlushFileBuffers(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFlushFileBuffers.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2047,7 +2130,7 @@ func FlushFileBuffers(handle Handle) (err error) { } func FlushViewOfFile(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procFlushViewOfFile.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -2059,7 +2142,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu if len(buf) > 0 { _p0 = &buf[0] } - r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) + r0, _, e1 := syscall.SyscallN(procFormatMessageW.Addr(), uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2068,7 +2151,7 @@ func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, bu } func FreeEnvironmentStrings(envs *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(envs))) if r1 == 0 { err = errnoErr(e1) } @@ -2076,7 +2159,7 @@ func FreeEnvironmentStrings(envs *uint16) (err error) { } func FreeLibrary(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procFreeLibrary.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -2084,7 +2167,7 @@ func FreeLibrary(handle Handle) (err error) { } func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) + r1, _, e1 := syscall.SyscallN(procGenerateConsoleCtrlEvent.Addr(), uintptr(ctrlEvent), uintptr(processGroupID)) if r1 == 0 { err = errnoErr(e1) } @@ -2092,19 +2175,19 @@ func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err erro } func GetACP() (acp uint32) { - r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetACP.Addr()) acp = uint32(r0) return } func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { - r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + r0, _, _ := syscall.SyscallN(procGetActiveProcessorCount.Addr(), uintptr(groupNumber)) ret = uint32(r0) return } func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommModemStatus.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpModemStat))) if r1 == 0 { err = errnoErr(e1) } @@ -2112,7 +2195,7 @@ func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { } func GetCommState(handle Handle, lpDCB *DCB) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB))) if r1 == 0 { err = errnoErr(e1) } @@ -2120,7 +2203,7 @@ func GetCommState(handle Handle, lpDCB *DCB) (err error) { } func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { - r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) + r1, _, e1 := syscall.SyscallN(procGetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts))) if r1 == 0 { err = errnoErr(e1) } @@ -2128,13 +2211,13 @@ func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { } func GetCommandLine() (cmd *uint16) { - r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCommandLineW.Addr()) cmd = (*uint16)(unsafe.Pointer(r0)) return } func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) + r1, _, e1 := syscall.SyscallN(procGetComputerNameExW.Addr(), uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) if r1 == 0 { err = errnoErr(e1) } @@ -2142,23 +2225,41 @@ func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { } func GetComputerName(buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) + r1, _, e1 := syscall.SyscallN(procGetComputerNameW.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) if r1 == 0 { err = errnoErr(e1) } return } +func GetConsoleCP() (cp uint32, err error) { + r0, _, e1 := syscall.SyscallN(procGetConsoleCP.Addr()) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleMode(console Handle, mode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) + r1, _, e1 := syscall.SyscallN(procGetConsoleMode.Addr(), uintptr(console), uintptr(unsafe.Pointer(mode))) if r1 == 0 { err = errnoErr(e1) } return } +func GetConsoleOutputCP() (cp uint32, err error) { + r0, _, e1 := syscall.SyscallN(procGetConsoleOutputCP.Addr()) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) + r1, _, e1 := syscall.SyscallN(procGetConsoleScreenBufferInfo.Addr(), uintptr(console), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -2166,7 +2267,7 @@ func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) ( } func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + r0, _, e1 := syscall.SyscallN(procGetCurrentDirectoryW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2175,19 +2276,19 @@ func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { } func GetCurrentProcessId() (pid uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCurrentProcessId.Addr()) pid = uint32(r0) return } func GetCurrentThreadId() (id uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentThreadId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetCurrentThreadId.Addr()) id = uint32(r0) return } func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) { - r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetDiskFreeSpaceExW.Addr(), uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes))) if r1 == 0 { err = errnoErr(e1) } @@ -2195,13 +2296,13 @@ func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint6 } func GetDriveType(rootPathName *uint16) (driveType uint32) { - r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetDriveTypeW.Addr(), uintptr(unsafe.Pointer(rootPathName))) driveType = uint32(r0) return } func GetEnvironmentStrings() (envs *uint16, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetEnvironmentStringsW.Addr()) envs = (*uint16)(unsafe.Pointer(r0)) if envs == nil { err = errnoErr(e1) @@ -2210,7 +2311,7 @@ func GetEnvironmentStrings() (envs *uint16, err error) { } func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procGetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2219,7 +2320,7 @@ func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32 } func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) + r1, _, e1 := syscall.SyscallN(procGetExitCodeProcess.Addr(), uintptr(handle), uintptr(unsafe.Pointer(exitcode))) if r1 == 0 { err = errnoErr(e1) } @@ -2227,7 +2328,7 @@ func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { } func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) + r1, _, e1 := syscall.SyscallN(procGetFileAttributesExW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } @@ -2235,7 +2336,7 @@ func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { } func GetFileAttributes(name *uint16) (attrs uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name))) attrs = uint32(r0) if attrs == INVALID_FILE_ATTRIBUTES { err = errnoErr(e1) @@ -2244,7 +2345,7 @@ func GetFileAttributes(name *uint16) (attrs uint32, err error) { } func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandle.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data))) if r1 == 0 { err = errnoErr(e1) } @@ -2252,7 +2353,7 @@ func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (e } func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandleEx.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen)) if r1 == 0 { err = errnoErr(e1) } @@ -2260,7 +2361,7 @@ func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, } func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime))) if r1 == 0 { err = errnoErr(e1) } @@ -2268,7 +2369,7 @@ func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim } func GetFileType(filehandle Handle) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFileType.Addr(), uintptr(filehandle)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2277,7 +2378,7 @@ func GetFileType(filehandle Handle) (n uint32, err error) { } func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFinalPathNameByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2286,7 +2387,7 @@ func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32 } func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetFullPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2295,13 +2396,13 @@ func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) ( } func GetLargePageMinimum() (size uintptr) { - r0, _, _ := syscall.Syscall(procGetLargePageMinimum.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetLargePageMinimum.Addr()) size = uintptr(r0) return } func GetLastError() (lasterr error) { - r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetLastError.Addr()) if r0 != 0 { lasterr = syscall.Errno(r0) } @@ -2309,7 +2410,7 @@ func GetLastError() (lasterr error) { } func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) + r0, _, e1 := syscall.SyscallN(procGetLogicalDriveStringsW.Addr(), uintptr(bufferLength), uintptr(unsafe.Pointer(buffer))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2318,7 +2419,7 @@ func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err } func GetLogicalDrives() (drivesBitMask uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetLogicalDrives.Addr()) drivesBitMask = uint32(r0) if drivesBitMask == 0 { err = errnoErr(e1) @@ -2327,7 +2428,7 @@ func GetLogicalDrives() (drivesBitMask uint32, err error) { } func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) + r0, _, e1 := syscall.SyscallN(procGetLongPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2336,13 +2437,13 @@ func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err er } func GetMaximumProcessorCount(groupNumber uint16) (ret uint32) { - r0, _, _ := syscall.Syscall(procGetMaximumProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + r0, _, _ := syscall.SyscallN(procGetMaximumProcessorCount.Addr(), uintptr(groupNumber)) ret = uint32(r0) return } func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) + r0, _, e1 := syscall.SyscallN(procGetModuleFileNameW.Addr(), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2351,7 +2452,15 @@ func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, } func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) + r1, _, e1 := syscall.SyscallN(procGetModuleHandleExW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetNamedPipeClientProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID))) if r1 == 0 { err = errnoErr(e1) } @@ -2359,7 +2468,7 @@ func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err er } func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2367,7 +2476,23 @@ func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, m } func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetNamedPipeServerProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procGetNumberOfConsoleInputEvents.Addr(), uintptr(console), uintptr(unsafe.Pointer(numevents))) if r1 == 0 { err = errnoErr(e1) } @@ -2379,7 +2504,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa if wait { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetOverlappedResult.Addr(), uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -2387,7 +2512,7 @@ func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wa } func GetPriorityClass(process Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetPriorityClass.Addr(), uintptr(process)) ret = uint32(r0) if ret == 0 { err = errnoErr(e1) @@ -2405,7 +2530,7 @@ func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { } func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { - r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) + r0, _, e1 := syscall.SyscallN(procGetProcAddress.Addr(), uintptr(module), uintptr(unsafe.Pointer(procname))) proc = uintptr(r0) if proc == 0 { err = errnoErr(e1) @@ -2414,7 +2539,7 @@ func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { } func GetProcessId(process Handle) (id uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetProcessId.Addr(), uintptr(process)) id = uint32(r0) if id == 0 { err = errnoErr(e1) @@ -2423,7 +2548,7 @@ func GetProcessId(process Handle) (id uint32, err error) { } func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetProcessPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2431,7 +2556,7 @@ func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uin } func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0) + r1, _, e1 := syscall.SyscallN(procGetProcessShutdownParameters.Addr(), uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags))) if r1 == 0 { err = errnoErr(e1) } @@ -2439,7 +2564,7 @@ func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { } func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) + r1, _, e1 := syscall.SyscallN(procGetProcessTimes.Addr(), uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime))) if r1 == 0 { err = errnoErr(e1) } @@ -2447,12 +2572,12 @@ func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, } func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) { - syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0) + syscall.SyscallN(procGetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags))) return } func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) + r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout)) if r1 == 0 { err = errnoErr(e1) } @@ -2460,7 +2585,7 @@ func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overl } func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) + r0, _, e1 := syscall.SyscallN(procGetShortPathNameW.Addr(), uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2469,12 +2594,12 @@ func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uin } func getStartupInfo(startupInfo *StartupInfo) { - syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) + syscall.SyscallN(procGetStartupInfoW.Addr(), uintptr(unsafe.Pointer(startupInfo))) return } func GetStdHandle(stdhandle uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetStdHandle.Addr(), uintptr(stdhandle)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -2483,7 +2608,7 @@ func GetStdHandle(stdhandle uint32) (handle Handle, err error) { } func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetSystemDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2492,7 +2617,7 @@ func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { } func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetSystemPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2500,17 +2625,17 @@ func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint } func GetSystemTimeAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + syscall.SyscallN(procGetSystemTimeAsFileTime.Addr(), uintptr(unsafe.Pointer(time))) return } func GetSystemTimePreciseAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + syscall.SyscallN(procGetSystemTimePreciseAsFileTime.Addr(), uintptr(unsafe.Pointer(time))) return } func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetSystemWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2519,7 +2644,7 @@ func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err erro } func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + r0, _, e1 := syscall.SyscallN(procGetTempPathW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf))) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2528,7 +2653,7 @@ func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { } func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetThreadPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2536,13 +2661,13 @@ func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint } func getTickCount64() (ms uint64) { - r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetTickCount64.Addr()) ms = uint64(r0) return } func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) + r0, _, e1 := syscall.SyscallN(procGetTimeZoneInformation.Addr(), uintptr(unsafe.Pointer(tzi))) rc = uint32(r0) if rc == 0xffffffff { err = errnoErr(e1) @@ -2551,7 +2676,7 @@ func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { } func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetUserPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -2559,7 +2684,7 @@ func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16 } func GetVersion() (ver uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) + r0, _, e1 := syscall.SyscallN(procGetVersion.Addr()) ver = uint32(r0) if ver == 0 { err = errnoErr(e1) @@ -2568,7 +2693,7 @@ func GetVersion() (ver uint32, err error) { } func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + r1, _, e1 := syscall.SyscallN(procGetVolumeInformationByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2576,7 +2701,7 @@ func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeN } func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + r1, _, e1 := syscall.SyscallN(procGetVolumeInformationW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize)) if r1 == 0 { err = errnoErr(e1) } @@ -2584,7 +2709,7 @@ func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volume } func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) + r1, _, e1 := syscall.SyscallN(procGetVolumeNameForVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) if r1 == 0 { err = errnoErr(e1) } @@ -2592,7 +2717,7 @@ func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint } func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) + r1, _, e1 := syscall.SyscallN(procGetVolumePathNameW.Addr(), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) if r1 == 0 { err = errnoErr(e1) } @@ -2600,7 +2725,7 @@ func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength ui } func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetVolumePathNamesForVolumeNameW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength))) if r1 == 0 { err = errnoErr(e1) } @@ -2608,7 +2733,7 @@ func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16 } func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + r0, _, e1 := syscall.SyscallN(procGetWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen)) len = uint32(r0) if len == 0 { err = errnoErr(e1) @@ -2617,7 +2742,7 @@ func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { } func initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procInitializeProcThreadAttributeList.Addr(), 4, uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procInitializeProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -2629,7 +2754,7 @@ func IsWow64Process(handle Handle, isWow64 *bool) (err error) { if *isWow64 { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0) + r1, _, e1 := syscall.SyscallN(procIsWow64Process.Addr(), uintptr(handle), uintptr(unsafe.Pointer(&_p0))) *isWow64 = _p0 != 0 if r1 == 0 { err = errnoErr(e1) @@ -2642,7 +2767,7 @@ func IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint1 if err != nil { return } - r1, _, e1 := syscall.Syscall(procIsWow64Process2.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) + r1, _, e1 := syscall.SyscallN(procIsWow64Process2.Addr(), uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine))) if r1 == 0 { err = errnoErr(e1) } @@ -2659,7 +2784,7 @@ func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, e } func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procLoadLibraryExW.Addr(), uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2677,7 +2802,7 @@ func LoadLibrary(libname string) (handle Handle, err error) { } func _LoadLibrary(libname *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) + r0, _, e1 := syscall.SyscallN(procLoadLibraryW.Addr(), uintptr(unsafe.Pointer(libname))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2686,7 +2811,7 @@ func _LoadLibrary(libname *uint16) (handle Handle, err error) { } func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + r0, _, e1 := syscall.SyscallN(procLoadResource.Addr(), uintptr(module), uintptr(resInfo)) resData = Handle(r0) if resData == 0 { err = errnoErr(e1) @@ -2695,7 +2820,7 @@ func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) { } func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) { - r0, _, e1 := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(length), 0) + r0, _, e1 := syscall.SyscallN(procLocalAlloc.Addr(), uintptr(flags), uintptr(length)) ptr = uintptr(r0) if ptr == 0 { err = errnoErr(e1) @@ -2704,7 +2829,7 @@ func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) { } func LocalFree(hmem Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) + r0, _, e1 := syscall.SyscallN(procLocalFree.Addr(), uintptr(hmem)) handle = Handle(r0) if handle != 0 { err = errnoErr(e1) @@ -2713,7 +2838,7 @@ func LocalFree(hmem Handle) (handle Handle, err error) { } func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) + r1, _, e1 := syscall.SyscallN(procLockFileEx.Addr(), uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -2721,7 +2846,7 @@ func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, byt } func LockResource(resData Handle) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall(procLockResource.Addr(), 1, uintptr(resData), 0, 0) + r0, _, e1 := syscall.SyscallN(procLockResource.Addr(), uintptr(resData)) addr = uintptr(r0) if addr == 0 { err = errnoErr(e1) @@ -2730,7 +2855,7 @@ func LockResource(resData Handle) (addr uintptr, err error) { } func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) + r0, _, e1 := syscall.SyscallN(procMapViewOfFile.Addr(), uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length)) addr = uintptr(r0) if addr == 0 { err = errnoErr(e1) @@ -2739,7 +2864,7 @@ func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow ui } func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procModule32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + r1, _, e1 := syscall.SyscallN(procModule32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2747,7 +2872,7 @@ func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { } func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procModule32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0) + r1, _, e1 := syscall.SyscallN(procModule32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2755,7 +2880,7 @@ func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) { } func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procMoveFileExW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -2763,7 +2888,7 @@ func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { } func MoveFile(from *uint16, to *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) + r1, _, e1 := syscall.SyscallN(procMoveFileW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to))) if r1 == 0 { err = errnoErr(e1) } @@ -2771,7 +2896,7 @@ func MoveFile(from *uint16, to *uint16) (err error) { } func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { - r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) + r0, _, e1 := syscall.SyscallN(procMultiByteToWideChar.Addr(), uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) nwrite = int32(r0) if nwrite == 0 { err = errnoErr(e1) @@ -2784,7 +2909,7 @@ func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procOpenEventW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2797,7 +2922,7 @@ func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle H if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + r0, _, e1 := syscall.SyscallN(procOpenMutexW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2810,7 +2935,7 @@ func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (ha if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) + r0, _, e1 := syscall.SyscallN(procOpenProcess.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2823,7 +2948,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand if inheritHandle { _p0 = 1 } - r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) + r0, _, e1 := syscall.SyscallN(procOpenThread.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) handle = Handle(r0) if handle == 0 { err = errnoErr(e1) @@ -2832,7 +2957,7 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand } func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) + r1, _, e1 := syscall.SyscallN(procPostQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -2840,7 +2965,7 @@ func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overla } func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + r1, _, e1 := syscall.SyscallN(procProcess32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2848,7 +2973,7 @@ func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { } func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + r1, _, e1 := syscall.SyscallN(procProcess32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -2856,7 +2981,7 @@ func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { } func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procProcessIdToSessionId.Addr(), 2, uintptr(pid), uintptr(unsafe.Pointer(sessionid)), 0) + r1, _, e1 := syscall.SyscallN(procProcessIdToSessionId.Addr(), uintptr(pid), uintptr(unsafe.Pointer(sessionid))) if r1 == 0 { err = errnoErr(e1) } @@ -2864,7 +2989,7 @@ func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) { } func PulseEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procPulseEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -2872,7 +2997,7 @@ func PulseEvent(event Handle) (err error) { } func PurgeComm(handle Handle, dwFlags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0) + r1, _, e1 := syscall.SyscallN(procPurgeComm.Addr(), uintptr(handle), uintptr(dwFlags)) if r1 == 0 { err = errnoErr(e1) } @@ -2880,7 +3005,7 @@ func PurgeComm(handle Handle, dwFlags uint32) (err error) { } func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) + r0, _, e1 := syscall.SyscallN(procQueryDosDeviceW.Addr(), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) n = uint32(r0) if n == 0 { err = errnoErr(e1) @@ -2889,7 +3014,7 @@ func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint3 } func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryFullProcessImageNameW.Addr(), 4, uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size)), 0, 0) + r1, _, e1 := syscall.SyscallN(procQueryFullProcessImageNameW.Addr(), uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size))) if r1 == 0 { err = errnoErr(e1) } @@ -2897,7 +3022,7 @@ func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size } func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)), 0) + r1, _, e1 := syscall.SyscallN(procQueryInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen))) if r1 == 0 { err = errnoErr(e1) } @@ -2905,7 +3030,7 @@ func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobO } func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) + r1, _, e1 := syscall.SyscallN(procReadConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl))) if r1 == 0 { err = errnoErr(e1) } @@ -2917,7 +3042,7 @@ func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree if watchSubTree { _p0 = 1 } - r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) + r1, _, e1 := syscall.SyscallN(procReadDirectoryChangesW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) if r1 == 0 { err = errnoErr(e1) } @@ -2929,7 +3054,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) ( if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procReadFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -2937,7 +3062,7 @@ func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) ( } func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesRead *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procReadProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead)), 0) + r1, _, e1 := syscall.SyscallN(procReadProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead))) if r1 == 0 { err = errnoErr(e1) } @@ -2945,7 +3070,7 @@ func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size u } func ReleaseMutex(mutex Handle) (err error) { - r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) + r1, _, e1 := syscall.SyscallN(procReleaseMutex.Addr(), uintptr(mutex)) if r1 == 0 { err = errnoErr(e1) } @@ -2953,7 +3078,7 @@ func ReleaseMutex(mutex Handle) (err error) { } func RemoveDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procRemoveDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -2961,7 +3086,7 @@ func RemoveDirectory(path *uint16) (err error) { } func RemoveDllDirectory(cookie uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0) + r1, _, e1 := syscall.SyscallN(procRemoveDllDirectory.Addr(), uintptr(cookie)) if r1 == 0 { err = errnoErr(e1) } @@ -2969,7 +3094,7 @@ func RemoveDllDirectory(cookie uintptr) (err error) { } func ResetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procResetEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -2977,7 +3102,7 @@ func ResetEvent(event Handle) (err error) { } func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { - r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(pconsole), uintptr(size), 0) + r0, _, _ := syscall.SyscallN(procResizePseudoConsole.Addr(), uintptr(pconsole), uintptr(size)) if r0 != 0 { hr = syscall.Errno(r0) } @@ -2985,7 +3110,7 @@ func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { } func ResumeThread(thread Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) + r0, _, e1 := syscall.SyscallN(procResumeThread.Addr(), uintptr(thread)) ret = uint32(r0) if ret == 0xffffffff { err = errnoErr(e1) @@ -2994,7 +3119,7 @@ func ResumeThread(thread Handle) (ret uint32, err error) { } func SetCommBreak(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetCommBreak.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3002,7 +3127,7 @@ func SetCommBreak(handle Handle) (err error) { } func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0) + r1, _, e1 := syscall.SyscallN(procSetCommMask.Addr(), uintptr(handle), uintptr(dwEvtMask)) if r1 == 0 { err = errnoErr(e1) } @@ -3010,7 +3135,7 @@ func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { } func SetCommState(handle Handle, lpDCB *DCB) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + r1, _, e1 := syscall.SyscallN(procSetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB))) if r1 == 0 { err = errnoErr(e1) } @@ -3018,7 +3143,15 @@ func SetCommState(handle Handle, lpDCB *DCB) (err error) { } func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { - r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) + r1, _, e1 := syscall.SyscallN(procSetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetConsoleCP(cp uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procSetConsoleCP.Addr(), uintptr(cp)) if r1 == 0 { err = errnoErr(e1) } @@ -3026,7 +3159,7 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { } func setConsoleCursorPosition(console Handle, position uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleCursorPosition.Addr(), uintptr(console), uintptr(position)) if r1 == 0 { err = errnoErr(e1) } @@ -3034,7 +3167,15 @@ func setConsoleCursorPosition(console Handle, position uint32) (err error) { } func SetConsoleMode(console Handle, mode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) + r1, _, e1 := syscall.SyscallN(procSetConsoleMode.Addr(), uintptr(console), uintptr(mode)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetConsoleOutputCP(cp uint32) (err error) { + r1, _, e1 := syscall.SyscallN(procSetConsoleOutputCP.Addr(), uintptr(cp)) if r1 == 0 { err = errnoErr(e1) } @@ -3042,7 +3183,7 @@ func SetConsoleMode(console Handle, mode uint32) (err error) { } func SetCurrentDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetCurrentDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3050,7 +3191,7 @@ func SetCurrentDirectory(path *uint16) (err error) { } func SetDefaultDllDirectories(directoryFlags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetDefaultDllDirectories.Addr(), 1, uintptr(directoryFlags), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetDefaultDllDirectories.Addr(), uintptr(directoryFlags)) if r1 == 0 { err = errnoErr(e1) } @@ -3067,7 +3208,7 @@ func SetDllDirectory(path string) (err error) { } func _SetDllDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetDllDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetDllDirectoryW.Addr(), uintptr(unsafe.Pointer(path))) if r1 == 0 { err = errnoErr(e1) } @@ -3075,7 +3216,7 @@ func _SetDllDirectory(path *uint16) (err error) { } func SetEndOfFile(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetEndOfFile.Addr(), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3083,7 +3224,7 @@ func SetEndOfFile(handle Handle) (err error) { } func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) + r1, _, e1 := syscall.SyscallN(procSetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value))) if r1 == 0 { err = errnoErr(e1) } @@ -3091,13 +3232,13 @@ func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { } func SetErrorMode(mode uint32) (ret uint32) { - r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) + r0, _, _ := syscall.SyscallN(procSetErrorMode.Addr(), uintptr(mode)) ret = uint32(r0) return } func SetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetEvent.Addr(), uintptr(event)) if r1 == 0 { err = errnoErr(e1) } @@ -3105,7 +3246,7 @@ func SetEvent(event Handle) (err error) { } func SetFileAttributes(name *uint16, attrs uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) + r1, _, e1 := syscall.SyscallN(procSetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(attrs)) if r1 == 0 { err = errnoErr(e1) } @@ -3113,7 +3254,7 @@ func SetFileAttributes(name *uint16, attrs uint32) (err error) { } func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(handle), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3121,7 +3262,7 @@ func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) } func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetFileInformationByHandle.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen)) if r1 == 0 { err = errnoErr(e1) } @@ -3129,7 +3270,7 @@ func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inB } func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { - r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetFilePointer.Addr(), uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence)) newlowoffset = uint32(r0) if newlowoffset == 0xffffffff { err = errnoErr(e1) @@ -3138,7 +3279,7 @@ func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence } func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime))) if r1 == 0 { err = errnoErr(e1) } @@ -3146,7 +3287,7 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim } func SetFileValidData(handle Handle, validDataLength int64) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + r1, _, e1 := syscall.SyscallN(procSetFileValidData.Addr(), uintptr(handle), uintptr(validDataLength)) if r1 == 0 { err = errnoErr(e1) } @@ -3154,7 +3295,7 @@ func SetFileValidData(handle Handle, validDataLength int64) (err error) { } func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) + r1, _, e1 := syscall.SyscallN(procSetHandleInformation.Addr(), uintptr(handle), uintptr(mask), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3162,7 +3303,7 @@ func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) } func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { - r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength)) ret = int(r0) if ret == 0 { err = errnoErr(e1) @@ -3171,7 +3312,7 @@ func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobOb } func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetNamedPipeHandleState.Addr(), 4, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetNamedPipeHandleState.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout))) if r1 == 0 { err = errnoErr(e1) } @@ -3179,7 +3320,7 @@ func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uin } func SetPriorityClass(process Handle, priorityClass uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) + r1, _, e1 := syscall.SyscallN(procSetPriorityClass.Addr(), uintptr(process), uintptr(priorityClass)) if r1 == 0 { err = errnoErr(e1) } @@ -3191,7 +3332,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) { if disable { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) + r1, _, e1 := syscall.SyscallN(procSetProcessPriorityBoost.Addr(), uintptr(process), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -3199,7 +3340,7 @@ func SetProcessPriorityBoost(process Handle, disable bool) (err error) { } func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetProcessShutdownParameters.Addr(), uintptr(level), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3207,7 +3348,7 @@ func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { } func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3215,7 +3356,7 @@ func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr } func SetStdHandle(stdhandle uint32, handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) + r1, _, e1 := syscall.SyscallN(procSetStdHandle.Addr(), uintptr(stdhandle), uintptr(handle)) if r1 == 0 { err = errnoErr(e1) } @@ -3223,7 +3364,7 @@ func SetStdHandle(stdhandle uint32, handle Handle) (err error) { } func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) + r1, _, e1 := syscall.SyscallN(procSetVolumeLabelW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName))) if r1 == 0 { err = errnoErr(e1) } @@ -3231,7 +3372,7 @@ func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { } func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) + r1, _, e1 := syscall.SyscallN(procSetVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName))) if r1 == 0 { err = errnoErr(e1) } @@ -3239,7 +3380,7 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro } func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) + r1, _, e1 := syscall.SyscallN(procSetupComm.Addr(), uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) if r1 == 0 { err = errnoErr(e1) } @@ -3247,7 +3388,7 @@ func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { } func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { - r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) + r0, _, e1 := syscall.SyscallN(procSizeofResource.Addr(), uintptr(module), uintptr(resInfo)) size = uint32(r0) if size == 0 { err = errnoErr(e1) @@ -3260,13 +3401,13 @@ func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { if alertable { _p0 = 1 } - r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) + r0, _, _ := syscall.SyscallN(procSleepEx.Addr(), uintptr(milliseconds), uintptr(_p0)) ret = uint32(r0) return } func TerminateJobObject(job Handle, exitCode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) + r1, _, e1 := syscall.SyscallN(procTerminateJobObject.Addr(), uintptr(job), uintptr(exitCode)) if r1 == 0 { err = errnoErr(e1) } @@ -3274,7 +3415,7 @@ func TerminateJobObject(job Handle, exitCode uint32) (err error) { } func TerminateProcess(handle Handle, exitcode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) + r1, _, e1 := syscall.SyscallN(procTerminateProcess.Addr(), uintptr(handle), uintptr(exitcode)) if r1 == 0 { err = errnoErr(e1) } @@ -3282,7 +3423,7 @@ func TerminateProcess(handle Handle, exitcode uint32) (err error) { } func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + r1, _, e1 := syscall.SyscallN(procThread32First.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -3290,7 +3431,7 @@ func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { } func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) + r1, _, e1 := syscall.SyscallN(procThread32Next.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry))) if r1 == 0 { err = errnoErr(e1) } @@ -3298,7 +3439,7 @@ func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { } func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procUnlockFileEx.Addr(), uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3306,7 +3447,7 @@ func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint3 } func UnmapViewOfFile(addr uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) + r1, _, e1 := syscall.SyscallN(procUnmapViewOfFile.Addr(), uintptr(addr)) if r1 == 0 { err = errnoErr(e1) } @@ -3314,7 +3455,7 @@ func UnmapViewOfFile(addr uintptr) (err error) { } func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procUpdateProcThreadAttribute.Addr(), 7, uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procUpdateProcThreadAttribute.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize))) if r1 == 0 { err = errnoErr(e1) } @@ -3322,7 +3463,7 @@ func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, } func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) + r0, _, e1 := syscall.SyscallN(procVirtualAlloc.Addr(), uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect)) value = uintptr(r0) if value == 0 { err = errnoErr(e1) @@ -3331,7 +3472,7 @@ func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint3 } func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) + r1, _, e1 := syscall.SyscallN(procVirtualFree.Addr(), uintptr(address), uintptr(size), uintptr(freetype)) if r1 == 0 { err = errnoErr(e1) } @@ -3339,7 +3480,7 @@ func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { } func VirtualLock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procVirtualLock.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3347,7 +3488,7 @@ func VirtualLock(addr uintptr, length uintptr) (err error) { } func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) + r1, _, e1 := syscall.SyscallN(procVirtualProtect.Addr(), uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect))) if r1 == 0 { err = errnoErr(e1) } @@ -3355,7 +3496,7 @@ func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect } func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect uint32, oldProtect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtectEx.Addr(), 5, uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect)), 0) + r1, _, e1 := syscall.SyscallN(procVirtualProtectEx.Addr(), uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect))) if r1 == 0 { err = errnoErr(e1) } @@ -3363,7 +3504,7 @@ func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect } func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualQuery.Addr(), 3, uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) + r1, _, e1 := syscall.SyscallN(procVirtualQuery.Addr(), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3371,7 +3512,7 @@ func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintpt } func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualQueryEx.Addr(), 4, uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length), 0, 0) + r1, _, e1 := syscall.SyscallN(procVirtualQueryEx.Addr(), uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3379,7 +3520,7 @@ func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformat } func VirtualUnlock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) + r1, _, e1 := syscall.SyscallN(procVirtualUnlock.Addr(), uintptr(addr), uintptr(length)) if r1 == 0 { err = errnoErr(e1) } @@ -3387,13 +3528,13 @@ func VirtualUnlock(addr uintptr, length uintptr) (err error) { } func WTSGetActiveConsoleSessionId() (sessionID uint32) { - r0, _, _ := syscall.Syscall(procWTSGetActiveConsoleSessionId.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procWTSGetActiveConsoleSessionId.Addr()) sessionID = uint32(r0) return } func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) + r1, _, e1 := syscall.SyscallN(procWaitCommEvent.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3405,7 +3546,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil if waitAll { _p0 = 1 } - r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0) + r0, _, e1 := syscall.SyscallN(procWaitForMultipleObjects.Addr(), uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds)) event = uint32(r0) if event == 0xffffffff { err = errnoErr(e1) @@ -3414,7 +3555,7 @@ func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMil } func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { - r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) + r0, _, e1 := syscall.SyscallN(procWaitForSingleObject.Addr(), uintptr(handle), uintptr(waitMilliseconds)) event = uint32(r0) if event == 0xffffffff { err = errnoErr(e1) @@ -3423,7 +3564,7 @@ func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, } func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) + r1, _, e1 := syscall.SyscallN(procWriteConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved))) if r1 == 0 { err = errnoErr(e1) } @@ -3435,7 +3576,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procWriteFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3443,7 +3584,7 @@ func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) } func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesWritten *uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten)), 0) + r1, _, e1 := syscall.SyscallN(procWriteProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten))) if r1 == 0 { err = errnoErr(e1) } @@ -3451,7 +3592,7 @@ func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size } func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) + r1, _, e1 := syscall.SyscallN(procAcceptEx.Addr(), uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped))) if r1 == 0 { err = errnoErr(e1) } @@ -3459,12 +3600,12 @@ func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32 } func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { - syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) + syscall.SyscallN(procGetAcceptExSockaddrs.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen))) return } func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) + r1, _, e1 := syscall.SyscallN(procTransmitFile.Addr(), uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3472,7 +3613,7 @@ func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint } func NetApiBufferFree(buf *byte) (neterr error) { - r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) + r0, _, _ := syscall.SyscallN(procNetApiBufferFree.Addr(), uintptr(unsafe.Pointer(buf))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3480,7 +3621,7 @@ func NetApiBufferFree(buf *byte) (neterr error) { } func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { - r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) + r0, _, _ := syscall.SyscallN(procNetGetJoinInformation.Addr(), uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3488,7 +3629,7 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete } func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) { - r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0) + r0, _, _ := syscall.SyscallN(procNetUserEnum.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3496,7 +3637,7 @@ func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, pr } func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { - r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) + r0, _, _ := syscall.SyscallN(procNetUserGetInfo.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf))) if r0 != 0 { neterr = syscall.Errno(r0) } @@ -3504,7 +3645,7 @@ func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **by } func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength), 0) + r0, _, _ := syscall.SyscallN(procNtCreateFile.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3512,7 +3653,7 @@ func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO } func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) { - r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3520,7 +3661,7 @@ func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, i } func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen)), 0) + r0, _, _ := syscall.SyscallN(procNtQueryInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3528,7 +3669,7 @@ func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe } func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32, retLen *uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen)), 0, 0) + r0, _, _ := syscall.SyscallN(procNtQuerySystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3536,7 +3677,7 @@ func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInf } func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, inBufferLen uint32, class uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class), 0) + r0, _, _ := syscall.SyscallN(procNtSetInformationFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3544,7 +3685,7 @@ func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, } func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procNtSetInformationProcess.Addr(), 4, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), 0, 0) + r0, _, _ := syscall.SyscallN(procNtSetInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3552,7 +3693,7 @@ func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.P } func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32) (ntstatus error) { - r0, _, _ := syscall.Syscall(procNtSetSystemInformation.Addr(), 3, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen)) + r0, _, _ := syscall.SyscallN(procNtSetSystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen)) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3560,13 +3701,13 @@ func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoL } func RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) { - r0, _, _ := syscall.Syscall(procRtlAddFunctionTable.Addr(), 3, uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress)) + r0, _, _ := syscall.SyscallN(procRtlAddFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress)) ret = r0 != 0 return } func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { - r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(acl)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(acl))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3574,13 +3715,13 @@ func RtlDefaultNpAcl(acl **ACL) (ntstatus error) { } func RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) { - r0, _, _ := syscall.Syscall(procRtlDeleteFunctionTable.Addr(), 1, uintptr(unsafe.Pointer(functionTable)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDeleteFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable))) ret = r0 != 0 return } func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3588,7 +3729,7 @@ func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFile } func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3596,18 +3737,18 @@ func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString } func RtlGetCurrentPeb() (peb *PEB) { - r0, _, _ := syscall.Syscall(procRtlGetCurrentPeb.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procRtlGetCurrentPeb.Addr()) peb = (*PEB)(unsafe.Pointer(r0)) return } func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { - syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) + syscall.SyscallN(procRtlGetNtVersionNumbers.Addr(), uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) return } func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) { - r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlGetVersion.Addr(), uintptr(unsafe.Pointer(info))) if r0 != 0 { ntstatus = NTStatus(r0) } @@ -3615,23 +3756,23 @@ func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) { } func RtlInitString(destinationString *NTString, sourceString *byte) { - syscall.Syscall(procRtlInitString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + syscall.SyscallN(procRtlInitString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString))) return } func RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) { - syscall.Syscall(procRtlInitUnicodeString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0) + syscall.SyscallN(procRtlInitUnicodeString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString))) return } func rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(ntstatus), 0, 0) + r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(ntstatus)) ret = syscall.Errno(r0) return } func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) + r0, _, _ := syscall.SyscallN(procCLSIDFromString.Addr(), uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3639,7 +3780,7 @@ func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { } func coCreateGuid(pguid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) + r0, _, _ := syscall.SyscallN(procCoCreateGuid.Addr(), uintptr(unsafe.Pointer(pguid))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3647,7 +3788,7 @@ func coCreateGuid(pguid *GUID) (ret error) { } func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) { - r0, _, _ := syscall.Syscall6(procCoGetObject.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable)), 0, 0) + r0, _, _ := syscall.SyscallN(procCoGetObject.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3655,7 +3796,7 @@ func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable * } func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) { - r0, _, _ := syscall.Syscall(procCoInitializeEx.Addr(), 2, uintptr(reserved), uintptr(coInit), 0) + r0, _, _ := syscall.SyscallN(procCoInitializeEx.Addr(), uintptr(reserved), uintptr(coInit)) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3663,23 +3804,23 @@ func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) { } func CoTaskMemFree(address unsafe.Pointer) { - syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) + syscall.SyscallN(procCoTaskMemFree.Addr(), uintptr(address)) return } func CoUninitialize() { - syscall.Syscall(procCoUninitialize.Addr(), 0, 0, 0, 0) + syscall.SyscallN(procCoUninitialize.Addr()) return } func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { - r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) + r0, _, _ := syscall.SyscallN(procStringFromGUID2.Addr(), uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) chars = int32(r0) return } func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumProcessModules.Addr(), 4, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), 0, 0) + r1, _, e1 := syscall.SyscallN(procEnumProcessModules.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded))) if r1 == 0 { err = errnoErr(e1) } @@ -3687,7 +3828,7 @@ func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uin } func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procEnumProcessModulesEx.Addr(), 5, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag), 0) + r1, _, e1 := syscall.SyscallN(procEnumProcessModulesEx.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag)) if r1 == 0 { err = errnoErr(e1) } @@ -3695,7 +3836,7 @@ func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *u } func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned))) + r1, _, e1 := syscall.SyscallN(procEnumProcesses.Addr(), uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned))) if r1 == 0 { err = errnoErr(e1) } @@ -3703,7 +3844,7 @@ func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err } func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleBaseNameW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleBaseNameW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -3711,7 +3852,7 @@ func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uin } func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleFileNameExW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleFileNameExW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) if r1 == 0 { err = errnoErr(e1) } @@ -3719,7 +3860,7 @@ func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size u } func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetModuleInformation.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetModuleInformation.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb)) if r1 == 0 { err = errnoErr(e1) } @@ -3727,7 +3868,7 @@ func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb } func QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) { - r1, _, e1 := syscall.Syscall(procQueryWorkingSetEx.Addr(), 3, uintptr(process), uintptr(pv), uintptr(cb)) + r1, _, e1 := syscall.SyscallN(procQueryWorkingSetEx.Addr(), uintptr(process), uintptr(pv), uintptr(cb)) if r1 == 0 { err = errnoErr(e1) } @@ -3739,7 +3880,7 @@ func SubscribeServiceChangeNotifications(service Handle, eventType uint32, callb if ret != nil { return } - r0, _, _ := syscall.Syscall6(procSubscribeServiceChangeNotifications.Addr(), 5, uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription)), 0) + r0, _, _ := syscall.SyscallN(procSubscribeServiceChangeNotifications.Addr(), uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -3751,12 +3892,12 @@ func UnsubscribeServiceChangeNotifications(subscription uintptr) (err error) { if err != nil { return } - syscall.Syscall(procUnsubscribeServiceChangeNotifications.Addr(), 1, uintptr(subscription), 0, 0) + syscall.SyscallN(procUnsubscribeServiceChangeNotifications.Addr(), uintptr(subscription)) return } func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) + r1, _, e1 := syscall.SyscallN(procGetUserNameExW.Addr(), uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) if r1&0xff == 0 { err = errnoErr(e1) } @@ -3764,7 +3905,7 @@ func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err er } func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) + r1, _, e1 := syscall.SyscallN(procTranslateNameW.Addr(), uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize))) if r1&0xff == 0 { err = errnoErr(e1) } @@ -3772,7 +3913,7 @@ func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint } func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiBuildDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + r1, _, e1 := syscall.SyscallN(procSetupDiBuildDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) if r1 == 0 { err = errnoErr(e1) } @@ -3780,7 +3921,7 @@ func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiCallClassInstaller.Addr(), 3, uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiCallClassInstaller.Addr(), uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3788,7 +3929,7 @@ func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInf } func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiCancelDriverInfoSearch.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiCancelDriverInfoSearch.Addr(), uintptr(deviceInfoSet)) if r1 == 0 { err = errnoErr(e1) } @@ -3796,7 +3937,7 @@ func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) { } func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiClassGuidsFromNameExW.Addr(), 6, uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupDiClassGuidsFromNameExW.Addr(), uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -3804,7 +3945,7 @@ func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGu } func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiClassNameFromGuidExW.Addr(), 6, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupDiClassNameFromGuidExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -3812,7 +3953,7 @@ func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSiz } func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { - r0, _, e1 := syscall.Syscall6(procSetupDiCreateDeviceInfoListExW.Addr(), 4, uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoListExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) handle = DevInfo(r0) if handle == DevInfo(InvalidHandle) { err = errnoErr(e1) @@ -3821,7 +3962,7 @@ func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineN } func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiCreateDeviceInfoW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3829,7 +3970,7 @@ func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUI } func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiDestroyDeviceInfoList.Addr(), 1, uintptr(deviceInfoSet), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDeviceInfoList.Addr(), uintptr(deviceInfoSet)) if r1 == 0 { err = errnoErr(e1) } @@ -3837,7 +3978,7 @@ func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) { } func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiDestroyDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) + r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType)) if r1 == 0 { err = errnoErr(e1) } @@ -3845,7 +3986,7 @@ func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiEnumDeviceInfo.Addr(), 3, uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiEnumDeviceInfo.Addr(), uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3853,7 +3994,7 @@ func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfo } func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiEnumDriverInfoW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiEnumDriverInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3861,7 +4002,7 @@ func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, d } func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) { - r0, _, e1 := syscall.Syscall9(procSetupDiGetClassDevsExW.Addr(), 7, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0) + r0, _, e1 := syscall.SyscallN(procSetupDiGetClassDevsExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved)) handle = DevInfo(r0) if handle == DevInfo(InvalidHandle) { err = errnoErr(e1) @@ -3870,7 +4011,7 @@ func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintp } func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetClassInstallParamsW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -3878,7 +4019,7 @@ func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInfoListDetailW.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInfoListDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData))) if r1 == 0 { err = errnoErr(e1) } @@ -3886,7 +4027,7 @@ func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailDa } func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) if r1 == 0 { err = errnoErr(e1) } @@ -3894,7 +4035,7 @@ func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf } func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetDeviceInstanceIdW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstanceIdW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -3902,7 +4043,7 @@ func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiGetDevicePropertyW.Addr(), 8, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDevicePropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags)) if r1 == 0 { err = errnoErr(e1) } @@ -3910,7 +4051,7 @@ func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procSetupDiGetDeviceRegistryPropertyW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -3918,7 +4059,7 @@ func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev } func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiGetDriverInfoDetailW.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetDriverInfoDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize))) if r1 == 0 { err = errnoErr(e1) } @@ -3926,7 +4067,7 @@ func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoDa } func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3934,7 +4075,7 @@ func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3942,7 +4083,7 @@ func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) { - r0, _, e1 := syscall.Syscall6(procSetupDiOpenDevRegKey.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired)) + r0, _, e1 := syscall.SyscallN(procSetupDiOpenDevRegKey.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired)) key = Handle(r0) if key == InvalidHandle { err = errnoErr(e1) @@ -3951,7 +4092,7 @@ func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Sc } func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiSetClassInstallParamsW.Addr(), 4, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), 0, 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize)) if r1 == 0 { err = errnoErr(e1) } @@ -3959,7 +4100,7 @@ func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfo } func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) + r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams))) if r1 == 0 { err = errnoErr(e1) } @@ -3967,7 +4108,7 @@ func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInf } func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetupDiSetDeviceRegistryPropertyW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize)) if r1 == 0 { err = errnoErr(e1) } @@ -3975,7 +4116,7 @@ func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *Dev } func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0) + r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3983,7 +4124,7 @@ func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) { - r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) + r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData))) if r1 == 0 { err = errnoErr(e1) } @@ -3991,7 +4132,7 @@ func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData } func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procSetupUninstallOEMInfW.Addr(), 3, uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved)) + r1, _, e1 := syscall.SyscallN(procSetupUninstallOEMInfW.Addr(), uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved)) if r1 == 0 { err = errnoErr(e1) } @@ -3999,7 +4140,7 @@ func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (er } func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { - r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) + r0, _, e1 := syscall.SyscallN(procCommandLineToArgvW.Addr(), uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc))) argv = (**uint16)(unsafe.Pointer(r0)) if argv == nil { err = errnoErr(e1) @@ -4008,7 +4149,7 @@ func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { } func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { - r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) + r0, _, _ := syscall.SyscallN(procSHGetKnownFolderPath.Addr(), uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -4016,7 +4157,7 @@ func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **u } func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { - r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) + r1, _, e1 := syscall.SyscallN(procShellExecuteW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) if r1 <= 32 { err = errnoErr(e1) } @@ -4024,12 +4165,12 @@ func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *ui } func EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) { - syscall.Syscall(procEnumChildWindows.Addr(), 3, uintptr(hwnd), uintptr(enumFunc), uintptr(param)) + syscall.SyscallN(procEnumChildWindows.Addr(), uintptr(hwnd), uintptr(enumFunc), uintptr(param)) return } func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall(procEnumWindows.Addr(), 2, uintptr(enumFunc), uintptr(param), 0) + r1, _, e1 := syscall.SyscallN(procEnumWindows.Addr(), uintptr(enumFunc), uintptr(param)) if r1 == 0 { err = errnoErr(e1) } @@ -4037,7 +4178,7 @@ func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) { } func ExitWindowsEx(flags uint32, reason uint32) (err error) { - r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) + r1, _, e1 := syscall.SyscallN(procExitWindowsEx.Addr(), uintptr(flags), uintptr(reason)) if r1 == 0 { err = errnoErr(e1) } @@ -4045,7 +4186,7 @@ func ExitWindowsEx(flags uint32, reason uint32) (err error) { } func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) { - r0, _, e1 := syscall.Syscall(procGetClassNameW.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) + r0, _, e1 := syscall.SyscallN(procGetClassNameW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount)) copied = int32(r0) if copied == 0 { err = errnoErr(e1) @@ -4054,33 +4195,39 @@ func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, e } func GetDesktopWindow() (hwnd HWND) { - r0, _, _ := syscall.Syscall(procGetDesktopWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetDesktopWindow.Addr()) hwnd = HWND(r0) return } func GetForegroundWindow() (hwnd HWND) { - r0, _, _ := syscall.Syscall(procGetForegroundWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetForegroundWindow.Addr()) hwnd = HWND(r0) return } func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetGUIThreadInfo.Addr(), 2, uintptr(thread), uintptr(unsafe.Pointer(info)), 0) + r1, _, e1 := syscall.SyscallN(procGetGUIThreadInfo.Addr(), uintptr(thread), uintptr(unsafe.Pointer(info))) if r1 == 0 { err = errnoErr(e1) } return } +func GetKeyboardLayout(tid uint32) (hkl Handle) { + r0, _, _ := syscall.SyscallN(procGetKeyboardLayout.Addr(), uintptr(tid)) + hkl = Handle(r0) + return +} + func GetShellWindow() (shellWindow HWND) { - r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0) + r0, _, _ := syscall.SyscallN(procGetShellWindow.Addr()) shellWindow = HWND(r0) return } func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowThreadProcessId.Addr(), 2, uintptr(hwnd), uintptr(unsafe.Pointer(pid)), 0) + r0, _, e1 := syscall.SyscallN(procGetWindowThreadProcessId.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(pid))) tid = uint32(r0) if tid == 0 { err = errnoErr(e1) @@ -4089,25 +4236,34 @@ func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) { } func IsWindow(hwnd HWND) (isWindow bool) { - r0, _, _ := syscall.Syscall(procIsWindow.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindow.Addr(), uintptr(hwnd)) isWindow = r0 != 0 return } func IsWindowUnicode(hwnd HWND) (isUnicode bool) { - r0, _, _ := syscall.Syscall(procIsWindowUnicode.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindowUnicode.Addr(), uintptr(hwnd)) isUnicode = r0 != 0 return } func IsWindowVisible(hwnd HWND) (isVisible bool) { - r0, _, _ := syscall.Syscall(procIsWindowVisible.Addr(), 1, uintptr(hwnd), 0, 0) + r0, _, _ := syscall.SyscallN(procIsWindowVisible.Addr(), uintptr(hwnd)) isVisible = r0 != 0 return } +func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) { + r0, _, e1 := syscall.SyscallN(procLoadKeyboardLayoutW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags)) + hkl = Handle(r0) + if hkl == 0 { + err = errnoErr(e1) + } + return +} + func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { - r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) + r0, _, e1 := syscall.SyscallN(procMessageBoxW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype)) ret = int32(r0) if ret == 0 { err = errnoErr(e1) @@ -4115,12 +4271,26 @@ func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret i return } +func ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) { + r0, _, _ := syscall.SyscallN(procToUnicodeEx.Addr(), uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl)) + ret = int32(r0) + return +} + +func UnloadKeyboardLayout(hkl Handle) (err error) { + r1, _, e1 := syscall.SyscallN(procUnloadKeyboardLayout.Addr(), uintptr(hkl)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { var _p0 uint32 if inheritExisting { _p0 = 1 } - r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) + r1, _, e1 := syscall.SyscallN(procCreateEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) if r1 == 0 { err = errnoErr(e1) } @@ -4128,7 +4298,7 @@ func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) ( } func DestroyEnvironmentBlock(block *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) + r1, _, e1 := syscall.SyscallN(procDestroyEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block))) if r1 == 0 { err = errnoErr(e1) } @@ -4136,7 +4306,7 @@ func DestroyEnvironmentBlock(block *uint16) (err error) { } func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) + r1, _, e1 := syscall.SyscallN(procGetUserProfileDirectoryW.Addr(), uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) if r1 == 0 { err = errnoErr(e1) } @@ -4153,7 +4323,7 @@ func GetFileVersionInfoSize(filename string, zeroHandle *Handle) (bufSize uint32 } func _GetFileVersionInfoSize(filename *uint16, zeroHandle *Handle) (bufSize uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileVersionInfoSizeW.Addr(), 2, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle)), 0) + r0, _, e1 := syscall.SyscallN(procGetFileVersionInfoSizeW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle))) bufSize = uint32(r0) if bufSize == 0 { err = errnoErr(e1) @@ -4171,7 +4341,7 @@ func GetFileVersionInfo(filename string, handle uint32, bufSize uint32, buffer u } func _GetFileVersionInfo(filename *uint16, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileVersionInfoW.Addr(), 4, uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer), 0, 0) + r1, _, e1 := syscall.SyscallN(procGetFileVersionInfoW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer)) if r1 == 0 { err = errnoErr(e1) } @@ -4188,7 +4358,7 @@ func VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer } func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVerQueryValueW.Addr(), 4, uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize)), 0, 0) + r1, _, e1 := syscall.SyscallN(procVerQueryValueW.Addr(), uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize))) if r1 == 0 { err = errnoErr(e1) } @@ -4196,7 +4366,7 @@ func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPoint } func TimeBeginPeriod(period uint32) (err error) { - r1, _, e1 := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0) + r1, _, e1 := syscall.SyscallN(proctimeBeginPeriod.Addr(), uintptr(period)) if r1 != 0 { err = errnoErr(e1) } @@ -4204,7 +4374,7 @@ func TimeBeginPeriod(period uint32) (err error) { } func TimeEndPeriod(period uint32) (err error) { - r1, _, e1 := syscall.Syscall(proctimeEndPeriod.Addr(), 1, uintptr(period), 0, 0) + r1, _, e1 := syscall.SyscallN(proctimeEndPeriod.Addr(), uintptr(period)) if r1 != 0 { err = errnoErr(e1) } @@ -4212,7 +4382,7 @@ func TimeEndPeriod(period uint32) (err error) { } func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) { - r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) + r0, _, _ := syscall.SyscallN(procWinVerifyTrustEx.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) if r0 != 0 { ret = syscall.Errno(r0) } @@ -4220,12 +4390,12 @@ func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) } func FreeAddrInfoW(addrinfo *AddrinfoW) { - syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) + syscall.SyscallN(procFreeAddrInfoW.Addr(), uintptr(unsafe.Pointer(addrinfo))) return } func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { - r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) + r0, _, _ := syscall.SyscallN(procGetAddrInfoW.Addr(), uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result))) if r0 != 0 { sockerr = syscall.Errno(r0) } @@ -4233,15 +4403,23 @@ func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, resul } func WSACleanup() (err error) { - r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) + r1, _, e1 := syscall.SyscallN(procWSACleanup.Addr()) if r1 == socket_error { err = errnoErr(e1) } return } +func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) { + r1, _, e1 := syscall.SyscallN(procWSADuplicateSocketW.Addr(), uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info))) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { - r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) + r0, _, e1 := syscall.SyscallN(procWSAEnumProtocolsW.Addr(), uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) n = int32(r0) if n == -1 { err = errnoErr(e1) @@ -4254,7 +4432,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f if wait { _p0 = 1 } - r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags))) if r1 == 0 { err = errnoErr(e1) } @@ -4262,7 +4440,7 @@ func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, f } func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) + r1, _, e1 := syscall.SyscallN(procWSAIoctl.Addr(), uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) if r1 == socket_error { err = errnoErr(e1) } @@ -4270,7 +4448,7 @@ func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbo } func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procWSALookupServiceBeginW.Addr(), 3, uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceBeginW.Addr(), uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) if r1 == socket_error { err = errnoErr(e1) } @@ -4278,7 +4456,7 @@ func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) } func WSALookupServiceEnd(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procWSALookupServiceEnd.Addr(), 1, uintptr(handle), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceEnd.Addr(), uintptr(handle)) if r1 == socket_error { err = errnoErr(e1) } @@ -4286,7 +4464,7 @@ func WSALookupServiceEnd(handle Handle) (err error) { } func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) { - r1, _, e1 := syscall.Syscall6(procWSALookupServiceNextW.Addr(), 4, uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSALookupServiceNextW.Addr(), uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet))) if r1 == socket_error { err = errnoErr(e1) } @@ -4294,7 +4472,7 @@ func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WS } func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSARecv.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4302,7 +4480,7 @@ func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32 } func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + r1, _, e1 := syscall.SyscallN(procWSARecvFrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4310,7 +4488,7 @@ func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *ui } func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + r1, _, e1 := syscall.SyscallN(procWSASend.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4318,7 +4496,7 @@ func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, } func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + r1, _, e1 := syscall.SyscallN(procWSASendTo.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) if r1 == socket_error { err = errnoErr(e1) } @@ -4326,7 +4504,7 @@ func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32 } func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procWSASocketW.Addr(), 6, uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags)) + r0, _, e1 := syscall.SyscallN(procWSASocketW.Addr(), uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -4335,7 +4513,7 @@ func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, } func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { - r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) + r0, _, _ := syscall.SyscallN(procWSAStartup.Addr(), uintptr(verreq), uintptr(unsafe.Pointer(data))) if r0 != 0 { sockerr = syscall.Errno(r0) } @@ -4343,7 +4521,7 @@ func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { } func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4351,7 +4529,7 @@ func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { } func Closesocket(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) + r1, _, e1 := syscall.SyscallN(procclosesocket.Addr(), uintptr(s)) if r1 == socket_error { err = errnoErr(e1) } @@ -4359,7 +4537,7 @@ func Closesocket(s Handle) (err error) { } func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + r1, _, e1 := syscall.SyscallN(procconnect.Addr(), uintptr(s), uintptr(name), uintptr(namelen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4376,7 +4554,7 @@ func GetHostByName(name string) (h *Hostent, err error) { } func _GetHostByName(name *byte) (h *Hostent, err error) { - r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procgethostbyname.Addr(), uintptr(unsafe.Pointer(name))) h = (*Hostent)(unsafe.Pointer(r0)) if h == nil { err = errnoErr(e1) @@ -4385,7 +4563,7 @@ func _GetHostByName(name *byte) (h *Hostent, err error) { } func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4402,7 +4580,7 @@ func GetProtoByName(name string) (p *Protoent, err error) { } func _GetProtoByName(name *byte) (p *Protoent, err error) { - r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + r0, _, e1 := syscall.SyscallN(procgetprotobyname.Addr(), uintptr(unsafe.Pointer(name))) p = (*Protoent)(unsafe.Pointer(r0)) if p == nil { err = errnoErr(e1) @@ -4425,7 +4603,7 @@ func GetServByName(name string, proto string) (s *Servent, err error) { } func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { - r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) + r0, _, e1 := syscall.SyscallN(procgetservbyname.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto))) s = (*Servent)(unsafe.Pointer(r0)) if s == nil { err = errnoErr(e1) @@ -4434,7 +4612,7 @@ func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { } func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4442,7 +4620,7 @@ func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { } func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { - r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) + r1, _, e1 := syscall.SyscallN(procgetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen))) if r1 == socket_error { err = errnoErr(e1) } @@ -4450,7 +4628,7 @@ func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int3 } func listen(s Handle, backlog int32) (err error) { - r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) + r1, _, e1 := syscall.SyscallN(proclisten.Addr(), uintptr(s), uintptr(backlog)) if r1 == socket_error { err = errnoErr(e1) } @@ -4458,7 +4636,7 @@ func listen(s Handle, backlog int32) (err error) { } func Ntohs(netshort uint16) (u uint16) { - r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) + r0, _, _ := syscall.SyscallN(procntohs.Addr(), uintptr(netshort)) u = uint16(r0) return } @@ -4468,7 +4646,7 @@ func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen * if len(buf) > 0 { _p0 = &buf[0] } - r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + r0, _, e1 := syscall.SyscallN(procrecvfrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int32(r0) if n == -1 { err = errnoErr(e1) @@ -4481,7 +4659,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) ( if len(buf) > 0 { _p0 = &buf[0] } - r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) + r1, _, e1 := syscall.SyscallN(procsendto.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4489,7 +4667,7 @@ func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) ( } func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { - r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) + r1, _, e1 := syscall.SyscallN(procsetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen)) if r1 == socket_error { err = errnoErr(e1) } @@ -4497,7 +4675,7 @@ func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32 } func shutdown(s Handle, how int32) (err error) { - r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) + r1, _, e1 := syscall.SyscallN(procshutdown.Addr(), uintptr(s), uintptr(how)) if r1 == socket_error { err = errnoErr(e1) } @@ -4505,7 +4683,7 @@ func shutdown(s Handle, how int32) (err error) { } func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) + r0, _, e1 := syscall.SyscallN(procsocket.Addr(), uintptr(af), uintptr(typ), uintptr(protocol)) handle = Handle(r0) if handle == InvalidHandle { err = errnoErr(e1) @@ -4514,7 +4692,7 @@ func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { } func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) + r1, _, e1 := syscall.SyscallN(procWTSEnumerateSessionsW.Addr(), uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count))) if r1 == 0 { err = errnoErr(e1) } @@ -4522,12 +4700,12 @@ func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessio } func WTSFreeMemory(ptr uintptr) { - syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) + syscall.SyscallN(procWTSFreeMemory.Addr(), uintptr(ptr)) return } func WTSQueryUserToken(session uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) + r1, _, e1 := syscall.SyscallN(procWTSQueryUserToken.Addr(), uintptr(session), uintptr(unsafe.Pointer(token))) if r1 == 0 { err = errnoErr(e1) } diff --git a/src/nvcgo/vendor/modules.txt b/src/nvcgo/vendor/modules.txt index b005da273..c12797fc4 100644 --- a/src/nvcgo/vendor/modules.txt +++ b/src/nvcgo/vendor/modules.txt @@ -1,10 +1,18 @@ -# github.com/cilium/ebpf v0.8.0 -## explicit; go 1.16 +# github.com/cilium/ebpf v0.20.0 +## explicit; go 1.24.0 github.com/cilium/ebpf github.com/cilium/ebpf/asm +github.com/cilium/ebpf/btf github.com/cilium/ebpf/internal -github.com/cilium/ebpf/internal/btf +github.com/cilium/ebpf/internal/efw +github.com/cilium/ebpf/internal/kallsyms +github.com/cilium/ebpf/internal/kconfig +github.com/cilium/ebpf/internal/linux +github.com/cilium/ebpf/internal/platform github.com/cilium/ebpf/internal/sys +github.com/cilium/ebpf/internal/sysenc +github.com/cilium/ebpf/internal/testutils/testmain +github.com/cilium/ebpf/internal/tracefs github.com/cilium/ebpf/internal/unix github.com/cilium/ebpf/link # github.com/google/uuid v1.6.0 @@ -16,9 +24,7 @@ github.com/opencontainers/runtime-spec/specs-go # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus -# golang.org/x/sys v0.21.0 -## explicit; go 1.18 +# golang.org/x/sys v0.37.0 +## explicit; go 1.24.0 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 -## explicit; go 1.11