diff --git a/.github/workflows/presubmit.yml b/.github/workflows/presubmit.yml index 4bb992ea07..1d0b446459 100644 --- a/.github/workflows/presubmit.yml +++ b/.github/workflows/presubmit.yml @@ -77,3 +77,8 @@ jobs: GCM_SECRET: ${{ secrets.GCM_SECRET }} GMP_PROMETHEUS_IMAGE: "gmp-prometheus:amd64" run: make -C ./google/internal/promqle2etest test + - name: Run prw2gcm GCM tests + env: + GCM_SECRET: ${{ secrets.GCM_SECRET }} + GMP_PROMETHEUS_IMAGE: "gmp-prometheus:amd64" + run: make -C ./google/cmd/prw2gcm test diff --git a/Dockerfile.google b/Dockerfile.google index 52ce4078d7..c09d2b9cbf 100644 --- a/Dockerfile.google +++ b/Dockerfile.google @@ -81,6 +81,11 @@ RUN CGO_ENABLED=0 go build \ -ldflags="-X github.com/prometheus/common/version.Version=$(cat VERSION) \ -X github.com/prometheus/common/version.BuildDate=$(date --iso-8601=seconds)" \ ./cmd/promtool +RUN CGO_ENABLED=0 go build \ + -mod=vendor \ + -ldflags="-X github.com/prometheus/common/version.Version=$(cat VERSION) \ + -X github.com/prometheus/common/version.BuildDate=$(date --iso-8601=seconds)" \ + ./google/cmd/prw2gcm # Configure distroless base image like the upstream Prometheus image. # Since the directory and symlink setup needs shell access, we need yet another @@ -97,6 +102,7 @@ FROM ${IMAGE_BASE} AS app COPY --from=buildbase /workspace/prometheus /bin/prometheus COPY --from=buildbase /workspace/promtool /bin/promtool +COPY --from=buildbase /workspace/prw2gcm /bin/prw2gcm COPY --from=appbase --chown=nobody:nobody /etc/prometheus /etc/prometheus COPY --from=appbase --chown=nobody:nobody /prometheus /prometheus COPY --from=appbase /usr/share/prometheus /usr/share/prometheus diff --git a/go.mod b/go.mod index 17411f6cf9..e61696ddf4 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,7 @@ go 1.24.0 toolchain go1.24.4 require ( + cloud.google.com/go/auth v0.16.0 cloud.google.com/go/compute/metadata v0.6.0 cloud.google.com/go/monitoring v1.24.2 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 @@ -33,6 +34,7 @@ require ( github.com/go-openapi/strfmt v0.23.0 github.com/go-zookeeper/zk v1.0.3 github.com/gogo/protobuf v1.3.2 + github.com/golang/protobuf v1.5.4 github.com/golang/snappy v0.0.4 github.com/google/go-cmp v0.7.0 github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db @@ -58,10 +60,12 @@ require ( github.com/oklog/run v1.1.0 github.com/oklog/ulid v1.3.1 github.com/ovh/go-ovh v1.5.1 + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 github.com/prometheus/alertmanager v0.27.0 - github.com/prometheus/client_golang v1.21.1 - github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.63.0 + github.com/prometheus/client_golang v1.23.0 + github.com/prometheus/client_golang/exp v0.0.0-20250820144310-1f3a9042681e + github.com/prometheus/client_model v0.6.2 + github.com/prometheus/common v0.65.0 github.com/prometheus/common/assets v0.2.0 github.com/prometheus/common/sigv4 v0.1.0 github.com/prometheus/compliance/promqle2e v0.0.0-20250417063348-1215c9d17a1a @@ -104,7 +108,6 @@ require ( ) require ( - cloud.google.com/go/auth v0.16.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect @@ -143,7 +146,6 @@ require ( github.com/godbus/dbus/v5 v5.0.4 // indirect github.com/golang-jwt/jwt/v5 v5.2.2 // indirect github.com/golang/glog v1.2.4 // indirect - github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect @@ -183,9 +185,8 @@ require ( github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/procfs v0.16.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect diff --git a/go.sum b/go.sum index 1be894c8db..359647fb29 100644 --- a/go.sum +++ b/go.sum @@ -555,15 +555,18 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= -github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= -github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= +github.com/prometheus/client_golang/exp v0.0.0-20250820144310-1f3a9042681e h1:RIcfJKXvsP1EFz4EOA/Xq2Z8Bi+b0SSyA8gsHkOYoDA= +github.com/prometheus/client_golang/exp v0.0.0-20250820144310-1f3a9042681e/go.mod h1:FGJuwvfcPY0V5enm+w8zF1RNS062yugQtPPQp1c4Io4= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= github.com/prometheus/common/assets v0.2.0 h1:0P5OrzoHrYBOSM1OigWL3mY8ZvV2N4zIE/5AahrSrfM= @@ -578,8 +581,9 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI= github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= diff --git a/google/cmd/prw2gcm/Makefile b/google/cmd/prw2gcm/Makefile new file mode 100644 index 0000000000..fc20d8b30c --- /dev/null +++ b/google/cmd/prw2gcm/Makefile @@ -0,0 +1,35 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https:#www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.PHONY: proto +proto: ## Regenerate Go from proto +proto: $(BUF) + @echo ">> regenerating Prometheus proto" + @$(BUF) generate + @# For some reasons buf generates this unused import, kill it manually for now and reformat. + @find io/prometheus/write/v2/ -type f -exec sed -i '' 's/_ "github.com\/gogo\/protobuf\/gogoproto"#g' {} \; + @go fmt ./io/prometheus/write/v2/... + +BUF = buf +$(BUF): + @go install github.com/bufbuild/buf/cmd/buf@v1.39.0 + +GCM_SECRET?= +.PHONY: test +test: ## Run special proxy unit tests that will use GCM if GCM_SECRET is present. +ifneq ($(GCM_SECRET),) + go test -tags gcme2e -v ./... +else + @echo "Secret not provided, skipping!" +endif diff --git a/google/cmd/prw2gcm/buf.gen.yaml b/google/cmd/prw2gcm/buf.gen.yaml new file mode 100644 index 0000000000..7521d1fb48 --- /dev/null +++ b/google/cmd/prw2gcm/buf.gen.yaml @@ -0,0 +1,21 @@ +# buf.gen.yaml +version: v2 + +plugins: +- remote: buf.build/protocolbuffers/go:v1.31.0 + out: . + opt: + - Mio/prometheus/write/v2/types.proto=io/prometheus/write/v2 + +# vtproto for efficiency utilities like pooling etc. +# https://buf.build/community/planetscale-vtprotobuf?version=v0.6.0 +- remote: buf.build/community/planetscale-vtprotobuf:v0.6.0 + out: . + opt: + - Mio/prometheus/write/v2/types.proto=io/prometheus/write/v2 + - features=marshal+unmarshal+size+clone + +inputs: +- module: buf.build/prometheus/prometheus:7dec989b8d3a49e6bbe6d7221400e8f8 # (bwplotka-experiment1 label). + types: + - "io.prometheus.write.v2.Request" diff --git a/google/cmd/prw2gcm/convert.go b/google/cmd/prw2gcm/convert.go new file mode 100644 index 0000000000..1fd70185bb --- /dev/null +++ b/google/cmd/prw2gcm/convert.go @@ -0,0 +1,397 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "errors" + "fmt" + "net/http" + "strings" + "time" + + monitoring_pb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + timestamp_pb "github.com/golang/protobuf/ptypes/timestamp" + "github.com/prometheus/client_golang/exp/api/remote" + writev2 "github.com/prometheus/prometheus/google/cmd/prw2gcm/io/prometheus/write/v2" + "github.com/prometheus/prometheus/model/value" + distribution_pb "google.golang.org/genproto/googleapis/api/distribution" + metric_pb "google.golang.org/genproto/googleapis/api/metric" + monitoredres_pb "google.golang.org/genproto/googleapis/api/monitoredres" +) + +// The target label keys used for the Prometheus monitored resource. +const ( + KeyProjectID = "project_id" + KeyLocation = "location" + KeyCluster = "cluster" + KeyNamespace = "namespace" + KeyJob = "job" + KeyInstance = "instance" + + // Maximum number of labels allowed on GCM series. + maxLabelCount = 100 + + metricTypePrefix = "prometheus.googleapis.com" +) + +type gcmQueue interface { + Enqueue(ts *monitoring_pb.TimeSeries) + Flush() +} + +// Convert converts Remote Write 2.x series and enqueue them as GCM v3 time series. +// TODO: Given this proxy is meant to be mostly for testing or temporary use, the implementation is limited. Consider extending if needed: +// * Support for classic histograms (accumulation needed, but possible). +// +// Other limitations, not planned to be fixed here for now: +// * No support for no CT for cumulative. +// For the general GCM implementation, those limitations might need to be handled. +func Convert(ctx context.Context, r *writev2.Request, q gcmQueue) (stats remote.WriteResponseStats, retErr error) { + initialSeries := make([]*monitoring_pb.TimeSeries, 0, len(r.Timeseries)) + initialSeriesTsIndex := make([]int, 0, len(r.Timeseries)) + maxPoints := 0 + + // Do initial pass for initial series to send. + for i, ts := range r.Timeseries { + if ctx.Err() != nil { + retErr = httpErrJoin(retErr, ctx.Err()) + return stats, retErr + } + + exportTS, points, err := initialTimeSeriesConvert(ts, r.Symbols) + if err != nil { + retErr = httpErrJoin(retErr, fmt.Errorf("series %v: initial conversion failed; series skipped: %w", ts.LabelsToString(r.Symbols), err)) + continue + } + if maxPoints < points { + maxPoints = points + } + initialSeries = append(initialSeries, exportTS) + initialSeriesTsIndex = append(initialSeriesTsIndex, i) + } + + // Go through samples. We have to do it over sample dimension, not series, given the + // GCM CreateTimeSeries API constraint of having one point for a single timeseries per whole request. + // NOTE: Prometheus currently only sends 1 sample per request per timeseries, but we are implementing the protocol here + // which supports future extensions (buffering) on Prometheus ecosystem side. + for p := 0; p < maxPoints; p++ { + for i, exportTS := range initialSeries { + if ctx.Err() != nil { + retErr = httpErrJoin(retErr, ctx.Err()) + return stats, retErr + } + + ts := r.Timeseries[initialSeriesTsIndex[i]] + s, point, err := convertTimeSeriesPoint(exportTS, ts, p) + if err != nil { + retErr = httpErrJoin(retErr, fmt.Errorf("series %v: point conversion failed; series skipped: %w", ts.LabelsToString(r.Symbols), err)) + continue + } + if point == nil { + continue + } + + // GCM supports single sample point only, reuse some parts, but generally send another timeseries. + // Shallow copy is enough (we care about the points being different only). + exportTSCopy := *exportTS + exportTSCopy.Points = []*monitoring_pb.Point{point} + q.Enqueue(&exportTSCopy) + + // We can't really tell the true "written" number in this proxy (this could + // be done internally on GCM ingestion one day). Notably, even if we batch + // fails, it could be whole batch or one sample per batch. Assume scheduled export + // a "written" event. + stats.Samples += s.Samples + stats.Histograms += s.Histograms + stats.Exemplars += s.Exemplars + } + // We have to flush now, so a new GCM request is ensured. + q.Flush() + } + return stats, retErr +} + +func initialTimeSeriesConvert(ts *writev2.TimeSeries, symbols []string) (_ *monitoring_pb.TimeSeries, points int, _ error) { + meta := ts.GetMetadata() + if meta == nil { + return nil, 0, newHTTPErrorf(http.StatusBadRequest, "metadata is required") + } + exportTS, err := initTSFromLabels(meta, ts.LabelsRefs, symbols) + if err != nil { + return nil, 0, err + } + // Reject classic histograms for now. It's implementable (best-effort), TBD later on in GCM only. + if meta.GetType() == writev2.Metadata_METRIC_TYPE_HISTOGRAM && len(ts.Samples) > 0 { + // Classic histogram detected, currently, this implementation requires "self-contained-histograms" (nhcb), + // reject classic ones. It's implementable (best-effort), TBD later on in GCM only. + // See: https://docs.google.com/document/d/1mpcSWH1B82q-BtJza-eJ8xMLlKt6EJ9oFGH325vtY1Q/edit + return nil, 0, newHTTPErrorf(http.StatusBadRequest, "classic histograms are not supported; use nhcb instead") + } + if len(ts.Samples) > 0 && len(ts.Histograms) > 0 { + return nil, 0, newHTTPErrorf(http.StatusBadRequest, "both samples and histogram samples provided; forbidden in PRW 2.x") + } + switch ts.GetMetadata().GetType() { + case writev2.Metadata_METRIC_TYPE_HISTOGRAM, writev2.Metadata_METRIC_TYPE_GAUGEHISTOGRAM: + // Process native histogram samples. + if len(ts.Histograms) == 0 { + return nil, 0, newHTTPErrorf(http.StatusBadRequest, "no histogram sample provided for histogram type metric") + } + exportTS.ValueType = metric_pb.MetricDescriptor_DISTRIBUTION + points = len(ts.Histograms) + default: + // Process float samples. + if len(ts.Samples) == 0 { + return nil, 0, newHTTPErrorf(http.StatusBadRequest, "no sample provided") + } + exportTS.ValueType = metric_pb.MetricDescriptor_DOUBLE + points = len(ts.Samples) + } + return exportTS, points, nil +} + +func convertTimeSeriesPoint(exportTS *monitoring_pb.TimeSeries, ts *writev2.TimeSeries, point int) (stats remote.WriteResponseStats, _ *monitoring_pb.Point, _ error) { + switch ts.GetMetadata().GetType() { + case writev2.Metadata_METRIC_TYPE_HISTOGRAM, writev2.Metadata_METRIC_TYPE_GAUGEHISTOGRAM: + if len(ts.Histograms) < point { + return stats, nil, nil + } + h := ts.Histograms[point] + // TODO: Skip staleness markers. + if exportTS.GetMetricKind() == metric_pb.MetricDescriptor_CUMULATIVE && h.CreatedTimestamp == 0 { + return stats, nil, newHTTPErrorf(http.StatusBadRequest, "created timestamp is required for every cumulative metric") + } + d, err := histogramSampleToDistribution(h) + if err != nil { + return stats, nil, err + } + stats.Histograms++ + // TODO(bwplotka): Exemplars. + return stats, &monitoring_pb.Point{ + Interval: &monitoring_pb.TimeInterval{ + StartTime: getTimestamp(h.CreatedTimestamp), + EndTime: getTimestamp(h.Timestamp), + }, + Value: &monitoring_pb.TypedValue{ + Value: &monitoring_pb.TypedValue_DistributionValue{DistributionValue: d}, + }, + }, nil + default: + if len(ts.Samples) < point { + return stats, nil, nil + } + s := ts.Samples[point] + if value.IsStaleNaN(s.Value) { + // Staleness markers are currently unsupported. + return stats, nil, nil + } + + if exportTS.GetMetricKind() == metric_pb.MetricDescriptor_CUMULATIVE && s.CreatedTimestamp == 0 { + return stats, nil, newHTTPErrorf(http.StatusBadRequest, "created timestamp is required for every cumulative metric") + } + stats.Samples++ + // TODO(bwplotka): Exemplars. + return stats, &monitoring_pb.Point{ + Interval: &monitoring_pb.TimeInterval{ + StartTime: getTimestamp(s.CreatedTimestamp), + EndTime: getTimestamp(s.Timestamp), + }, + Value: &monitoring_pb.TypedValue{ + Value: &monitoring_pb.TypedValue_DoubleValue{DoubleValue: s.Value}, + }, + }, nil + } +} + +func initTSFromLabels(meta *writev2.Metadata, labelsRefs []uint32, symbols []string) (*monitoring_pb.TimeSeries, error) { + name := "" + resLabels := map[string]string{} + metricLabels := map[string]string{} + + // Remote Write contains all labels in one sorted, interned array. + // Validate if we have all labels required for the resource. + // TODO(bwplotka): Check len(labelRefs) mod 2, etc + for i := 0; i < len(labelsRefs); i += 2 { + n := symbols[labelsRefs[i]] + v := symbols[labelsRefs[i+1]] + + switch n { + case "__name__": + if v == "" { + return nil, newHTTPErrorf(http.StatusBadRequest, "empty metric name (__name__) label") + } + name = v + case KeyProjectID, KeyLocation, KeyCluster, KeyNamespace, KeyJob, KeyInstance: + if v == "" { + continue + } + resLabels[n] = v + default: + metricLabels[n] = v + } + } + if name == "" { + return nil, newHTTPErrorf(http.StatusBadRequest, "no metric name (__name__) label found") + } + if len(metricLabels) > maxLabelCount { + // TODO: Is the field limit is lifted in the GCM API already? + return nil, newHTTPErrorf(http.StatusBadRequest, "metric labels exceed the limit of %d; got %v", maxLabelCount, len(metricLabels)) + } + + descriptor, kind, err := describeMetric(name, meta.GetType()) + if err != nil { + return nil, err + } + res := &monitoredres_pb.MonitoredResource{ + Type: "prometheus_target", + Labels: map[string]string{ + // Ensure all required labels are set (even if empty), otherwise GCM request + // will fail. Empty string is a valid value in GCM and not the same as being unset. + // NOTE: We could consider mode of the proxy where we validate those. Ignore for now. + KeyProjectID: resLabels[KeyProjectID], + KeyLocation: resLabels[KeyLocation], + KeyCluster: resLabels[KeyCluster], + KeyNamespace: resLabels[KeyNamespace], + KeyJob: resLabels[KeyJob], + KeyInstance: resLabels[KeyInstance], + }, + } + return &monitoring_pb.TimeSeries{ + Resource: res, + Metric: &metric_pb.Metric{ + Type: descriptor, + Labels: metricLabels, + }, + MetricKind: kind, + Description: symbols[meta.GetHelpRef()], + Unit: symbols[meta.GetUnitRef()], // TODO: Check if it's safe to do (new behaviour vs export pkg). + }, nil +} + +// getTimestamp converts a millisecond timestamp into a protobuf timestamp. +func getTimestamp(t int64) *timestamp_pb.Timestamp { + return ×tamp_pb.Timestamp{ + Seconds: t / 1000, + Nanos: int32((t % 1000) * int64(time.Millisecond)), + } +} + +func histogramSampleToDistribution(s *writev2.Histogram) (*distribution_pb.Distribution, error) { + // TODO(bwplotka): implement. + return nil, errors.New("exponential histogram not implemented yet") + +} + +// describeMetric creates a GCM metric type from the Prometheus metric name and a type suffix. +// Optionally, a secondary type suffix may be provided for series for which a Prometheus type +// may be written as different GCM series. +// +// The general rule is that if the primary suffix is ambiguous about whether the specific series +// is to be treated as a counter or gauge at query time, the secondarySuffix is set to "counter" +// for the counter variant, and left empty for the gauge variant. +func describeMetric(name string, typ writev2.Metadata_MetricType) ( + descriptor string, + kind metric_pb.MetricDescriptor_MetricKind, + _ error, +) { + suffix := gcmMetricSuffixNone + extraSuffix := gcmMetricSuffixNone + + switch typ { + case writev2.Metadata_METRIC_TYPE_COUNTER: + suffix = gcmMetricSuffixCounter + kind = metric_pb.MetricDescriptor_CUMULATIVE + case writev2.Metadata_METRIC_TYPE_GAUGE: + suffix = gcmMetricSuffixGauge + kind = metric_pb.MetricDescriptor_GAUGE + case writev2.Metadata_METRIC_TYPE_HISTOGRAM: + suffix = gcmMetricSuffixHistogram + kind = metric_pb.MetricDescriptor_CUMULATIVE + case writev2.Metadata_METRIC_TYPE_GAUGEHISTOGRAM: + // TODO: Check if it's safe to do (new behaviour vs export pkg). + suffix = gcmMetricSuffixHistogram + kind = metric_pb.MetricDescriptor_GAUGE + case writev2.Metadata_METRIC_TYPE_SUMMARY: + switch ms := getMetricSuffix(name); ms { + case metricSuffixSum: + suffix = gcmMetricSuffixSummary + extraSuffix = gcmMetricSuffixCounter + kind = metric_pb.MetricDescriptor_CUMULATIVE + case metricSuffixCount: + suffix = gcmMetricSuffixSummary + extraSuffix = gcmMetricSuffixNone + kind = metric_pb.MetricDescriptor_CUMULATIVE + case metricSuffixNone: // Actual quantiles. + suffix = gcmMetricSuffixSummary + extraSuffix = gcmMetricSuffixNone + kind = metric_pb.MetricDescriptor_GAUGE + default: + return "", kind, newHTTPErrorf(http.StatusBadRequest, "unknown summary series suffix %v", ms) + } + case writev2.Metadata_METRIC_TYPE_INFO, writev2.Metadata_METRIC_TYPE_STATESET: + // TODO: Check if it's safe to do (new behaviour vs export pkg). + suffix = gcmMetricSuffixGauge + kind = metric_pb.MetricDescriptor_GAUGE + case writev2.Metadata_METRIC_TYPE_UNSPECIFIED: + fallthrough + default: + return "", kind, newHTTPErrorf(http.StatusBadRequest, "unknown metric type %v", typ) + } + if extraSuffix == gcmMetricSuffixNone { + return fmt.Sprintf("%s/%s/%s", metricTypePrefix, name, suffix), kind, nil + } + return fmt.Sprintf("%s/%s/%s:%s", metricTypePrefix, name, suffix, extraSuffix), kind, nil +} + +func getMetricSuffix(name string) metricSuffix { + if strings.HasSuffix(name, string(metricSuffixTotal)) { + return metricSuffixTotal + } + if strings.HasSuffix(name, string(metricSuffixBucket)) { + return metricSuffixBucket + } + if strings.HasSuffix(name, string(metricSuffixCount)) { + return metricSuffixCount + } + if strings.HasSuffix(name, string(metricSuffixSum)) { + return metricSuffixSum + } + return metricSuffixNone +} + +// Metric name suffixes used by various Prometheus metric types. +type metricSuffix string + +const ( + metricSuffixNone metricSuffix = "" + metricSuffixTotal metricSuffix = "_total" + metricSuffixBucket metricSuffix = "_bucket" + metricSuffixSum metricSuffix = "_sum" + metricSuffixCount metricSuffix = "_count" +) + +// Suffixes appended to GCM metric types. They are equivalent to the respective +// Prometheus types but we redefine them here to ensure they don't unexpectedly change +// by updating a Prometheus library. +type gcmMetricSuffix string + +const ( + gcmMetricSuffixNone gcmMetricSuffix = "" + gcmMetricSuffixUnknown gcmMetricSuffix = "unknown" + gcmMetricSuffixGauge gcmMetricSuffix = "gauge" + gcmMetricSuffixCounter gcmMetricSuffix = "counter" + gcmMetricSuffixHistogram gcmMetricSuffix = "histogram" + gcmMetricSuffixSummary gcmMetricSuffix = "summary" +) diff --git a/google/cmd/prw2gcm/http_error.go b/google/cmd/prw2gcm/http_error.go new file mode 100644 index 0000000000..1c58a08a10 --- /dev/null +++ b/google/cmd/prw2gcm/http_error.go @@ -0,0 +1,116 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "errors" + "fmt" + "net/http" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var _ error = httpError{} + +type httpError struct { + error + code int +} + +func (e httpError) HTTPCode() int { + if e.error == nil || e.code == 0 { + return http.StatusInternalServerError + } + return e.code +} + +func newHTTPError(code int, str string) error { + return &httpError{ + error: errors.New(str), code: code, + } +} + +func newHTTPErrorf(code int, str string, v ...any) error { + return &httpError{ + error: fmt.Errorf(str, v...), code: code, + } +} + +func newHTTPErrorFromGRPC(err error) error { + code := http.StatusInternalServerError + // TODO: Add support for more codes, especially those that shouldn't be retried. + switch status.Code(err) { + case codes.OK: + return nil + case codes.Unauthenticated: + code = http.StatusUnauthorized + case codes.InvalidArgument: + code = http.StatusBadRequest + default: + } + return &httpError{ + error: err, code: code, + } +} + +func httpCodeFromError(err error) (int, bool) { + if err == nil { + return 0, false + } + type httpCode interface{ HTTPCode() int } + + if hc, ok := err.(httpCode); ok { + return hc.HTTPCode(), true + } + var hc httpCode + if errors.As(err, &hc) { + return hc.HTTPCode(), true + } + return 0, false +} + +func httpCodeFromErrorOr500(err error) int { + if c, ok := httpCodeFromError(err); ok { + return c + } + return http.StatusInternalServerError +} + +// TODO: There's likely a way to aggregate HTTP codes on read time with errors.Join. +func httpErrJoin(e ...error) error { + var ( + retCode int + retErr error + ) + for _, err := range e { + if err == nil { + continue + } + if c, ok := httpCodeFromError(err); ok { + if retCode < c { + retCode = c + } + } + retErr = errors.Join(retErr, err) + } + if retErr == nil { + return nil + } + if retCode > 0 { + return newHTTPError(retCode, retErr.Error()) + } + return retErr +} diff --git a/google/cmd/prw2gcm/io/prometheus/write/v2/codec.go b/google/cmd/prw2gcm/io/prometheus/write/v2/codec.go new file mode 100644 index 0000000000..81e7b4dfe0 --- /dev/null +++ b/google/cmd/prw2gcm/io/prometheus/write/v2/codec.go @@ -0,0 +1,59 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package writev2 + +import ( + "fmt" + "strings" +) + +// LabelsToString return labels into PromQL-like readable string. +func (m *TimeSeries) LabelsToString(symbols []string) string { + var ( + name string + lbls []string + ) + for i := 0; i < len(m.LabelsRefs); i += 2 { + n, v := symbols[m.LabelsRefs[i]], symbols[m.LabelsRefs[i+1]] + if n == "__name__" { + name = v + continue + } + // TODO: Quote UTF-8. + lbls = append(lbls, fmt.Sprintf("%v=%q", n, v)) + } + + b := strings.Builder{} + if name != "" { + b.WriteString(name) + } + b.WriteString("{") + b.WriteString(strings.Join(lbls, ", ")) + b.WriteString("}") + return b.String() +} diff --git a/google/cmd/prw2gcm/io/prometheus/write/v2/symbols.go b/google/cmd/prw2gcm/io/prometheus/write/v2/symbols.go new file mode 100644 index 0000000000..fc5e1baa36 --- /dev/null +++ b/google/cmd/prw2gcm/io/prometheus/write/v2/symbols.go @@ -0,0 +1,73 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package writev2 + +// SymbolsTable implements table for easy symbol use. +type SymbolsTable struct { + strings []string + symbolsMap map[string]uint32 +} + +// NewSymbolTable returns a symbol table. +func NewSymbolTable() SymbolsTable { + return SymbolsTable{ + // Empty string is required as a first element. + symbolsMap: map[string]uint32{"": 0}, + strings: []string{""}, + } +} + +// Symbolize adds (if not added before) a string to the symbols table, +// while returning its reference number. +func (t *SymbolsTable) Symbolize(str string) uint32 { + if ref, ok := t.symbolsMap[str]; ok { + return ref + } + ref := uint32(len(t.strings)) + t.strings = append(t.strings, str) + t.symbolsMap[str] = ref + return ref +} + +// Symbols returns computes symbols table to put in e.g. Request.Symbols. +// As per spec, order does not matter. +func (t *SymbolsTable) Symbols() []string { + return t.strings +} + +// Reset clears symbols table. +func (t *SymbolsTable) Reset() { + // NOTE: Make sure to keep empty symbol. + t.strings = t.strings[:1] + for k := range t.symbolsMap { + if k == "" { + continue + } + delete(t.symbolsMap, k) + } +} diff --git a/google/cmd/prw2gcm/io/prometheus/write/v2/symbols_test.go b/google/cmd/prw2gcm/io/prometheus/write/v2/symbols_test.go new file mode 100644 index 0000000000..ae082e3e13 --- /dev/null +++ b/google/cmd/prw2gcm/io/prometheus/write/v2/symbols_test.go @@ -0,0 +1,60 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package writev2 + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSymbolsTable(t *testing.T) { + s := NewSymbolTable() + require.Equal(t, []string{""}, s.Symbols(), "required empty reference does not exist") + require.Equal(t, uint32(0), s.Symbolize("")) + require.Equal(t, []string{""}, s.Symbols()) + + require.Equal(t, uint32(1), s.Symbolize("abc")) + require.Equal(t, []string{"", "abc"}, s.Symbols()) + + require.Equal(t, uint32(2), s.Symbolize("__name__")) + require.Equal(t, []string{"", "abc", "__name__"}, s.Symbols()) + + require.Equal(t, uint32(3), s.Symbolize("foo")) + require.Equal(t, []string{"", "abc", "__name__", "foo"}, s.Symbols()) + + s.Reset() + require.Equal(t, []string{""}, s.Symbols(), "required empty reference does not exist") + require.Equal(t, uint32(0), s.Symbolize("")) + + require.Equal(t, uint32(1), s.Symbolize("__name__")) + require.Equal(t, []string{"", "__name__"}, s.Symbols()) + + require.Equal(t, uint32(2), s.Symbolize("abc")) + require.Equal(t, []string{"", "__name__", "abc"}, s.Symbols()) +} diff --git a/google/cmd/prw2gcm/io/prometheus/write/v2/types.pb.go b/google/cmd/prw2gcm/io/prometheus/write/v2/types.pb.go new file mode 100644 index 0000000000..3341b7d746 --- /dev/null +++ b/google/cmd/prw2gcm/io/prometheus/write/v2/types.pb.go @@ -0,0 +1,1210 @@ +// Copyright 2024 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// NOTE: This file is also available on https://buf.build/prometheus/prometheus/docs/main:io.prometheus.write.v2 + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc (unknown) +// source: io/prometheus/write/v2/types.proto + +package writev2 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Metadata_MetricType int32 + +const ( + Metadata_METRIC_TYPE_UNSPECIFIED Metadata_MetricType = 0 + Metadata_METRIC_TYPE_COUNTER Metadata_MetricType = 1 + Metadata_METRIC_TYPE_GAUGE Metadata_MetricType = 2 + Metadata_METRIC_TYPE_HISTOGRAM Metadata_MetricType = 3 + Metadata_METRIC_TYPE_GAUGEHISTOGRAM Metadata_MetricType = 4 + Metadata_METRIC_TYPE_SUMMARY Metadata_MetricType = 5 + Metadata_METRIC_TYPE_INFO Metadata_MetricType = 6 + Metadata_METRIC_TYPE_STATESET Metadata_MetricType = 7 +) + +// Enum value maps for Metadata_MetricType. +var ( + Metadata_MetricType_name = map[int32]string{ + 0: "METRIC_TYPE_UNSPECIFIED", + 1: "METRIC_TYPE_COUNTER", + 2: "METRIC_TYPE_GAUGE", + 3: "METRIC_TYPE_HISTOGRAM", + 4: "METRIC_TYPE_GAUGEHISTOGRAM", + 5: "METRIC_TYPE_SUMMARY", + 6: "METRIC_TYPE_INFO", + 7: "METRIC_TYPE_STATESET", + } + Metadata_MetricType_value = map[string]int32{ + "METRIC_TYPE_UNSPECIFIED": 0, + "METRIC_TYPE_COUNTER": 1, + "METRIC_TYPE_GAUGE": 2, + "METRIC_TYPE_HISTOGRAM": 3, + "METRIC_TYPE_GAUGEHISTOGRAM": 4, + "METRIC_TYPE_SUMMARY": 5, + "METRIC_TYPE_INFO": 6, + "METRIC_TYPE_STATESET": 7, + } +) + +func (x Metadata_MetricType) Enum() *Metadata_MetricType { + p := new(Metadata_MetricType) + *p = x + return p +} + +func (x Metadata_MetricType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Metadata_MetricType) Descriptor() protoreflect.EnumDescriptor { + return file_io_prometheus_write_v2_types_proto_enumTypes[0].Descriptor() +} + +func (Metadata_MetricType) Type() protoreflect.EnumType { + return &file_io_prometheus_write_v2_types_proto_enumTypes[0] +} + +func (x Metadata_MetricType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Metadata_MetricType.Descriptor instead. +func (Metadata_MetricType) EnumDescriptor() ([]byte, []int) { + return file_io_prometheus_write_v2_types_proto_rawDescGZIP(), []int{4, 0} +} + +type Histogram_ResetHint int32 + +const ( + Histogram_RESET_HINT_UNSPECIFIED Histogram_ResetHint = 0 // Need to test for a counter reset explicitly. + Histogram_RESET_HINT_YES Histogram_ResetHint = 1 // This is the 1st histogram after a counter reset. + Histogram_RESET_HINT_NO Histogram_ResetHint = 2 // There was no counter reset between this and the previous Histogram. + Histogram_RESET_HINT_GAUGE Histogram_ResetHint = 3 // This is a gauge histogram where counter resets don't happen. +) + +// Enum value maps for Histogram_ResetHint. +var ( + Histogram_ResetHint_name = map[int32]string{ + 0: "RESET_HINT_UNSPECIFIED", + 1: "RESET_HINT_YES", + 2: "RESET_HINT_NO", + 3: "RESET_HINT_GAUGE", + } + Histogram_ResetHint_value = map[string]int32{ + "RESET_HINT_UNSPECIFIED": 0, + "RESET_HINT_YES": 1, + "RESET_HINT_NO": 2, + "RESET_HINT_GAUGE": 3, + } +) + +func (x Histogram_ResetHint) Enum() *Histogram_ResetHint { + p := new(Histogram_ResetHint) + *p = x + return p +} + +func (x Histogram_ResetHint) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Histogram_ResetHint) Descriptor() protoreflect.EnumDescriptor { + return file_io_prometheus_write_v2_types_proto_enumTypes[1].Descriptor() +} + +func (Histogram_ResetHint) Type() protoreflect.EnumType { + return &file_io_prometheus_write_v2_types_proto_enumTypes[1] +} + +func (x Histogram_ResetHint) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Histogram_ResetHint.Descriptor instead. +func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) { + return file_io_prometheus_write_v2_types_proto_rawDescGZIP(), []int{5, 0} +} + +// Request represents a request to write the given timeseries to a remote destination. +// This message was introduced in the Remote Write 2.0 specification: +// https://prometheus.io/docs/concepts/remote_write_spec_2_0/ +// +// The canonical Content-Type request header value for this message is +// "application/x-protobuf;proto=io.prometheus.write.v2.Request" +// +// Version: v2.0-rc.4 +// +// NOTE: gogoproto options might change in future for this file, they +// are not part of the spec proto (they only modify the generated Go code, not +// the serialized message). See: https://github.com/prometheus/prometheus/issues/11908 +type Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // symbols contains a de-duplicated array of string elements used for various + // items in a Request message, like labels and metadata items. For the sender's convenience + // around empty values for optional fields like unit_ref, symbols array MUST start with + // empty string. + // + // To decode each of the symbolized strings, referenced, by "ref(s)" suffix, you + // need to lookup the actual string by index from symbols array. The order of + // strings is up to the sender. The receiver should not assume any particular encoding. + Symbols []string `protobuf:"bytes,4,rep,name=symbols,proto3" json:"symbols,omitempty"` + // timeseries represents an array of distinct series with 0 or more samples. + Timeseries []*TimeSeries `protobuf:"bytes,5,rep,name=timeseries,proto3" json:"timeseries,omitempty"` +} + +func (x *Request) Reset() { + *x = Request{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Request) ProtoMessage() {} + +func (x *Request) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Request.ProtoReflect.Descriptor instead. +func (*Request) Descriptor() ([]byte, []int) { + return file_io_prometheus_write_v2_types_proto_rawDescGZIP(), []int{0} +} + +func (x *Request) GetSymbols() []string { + if x != nil { + return x.Symbols + } + return nil +} + +func (x *Request) GetTimeseries() []*TimeSeries { + if x != nil { + return x.Timeseries + } + return nil +} + +// TimeSeries represents a single series. +type TimeSeries struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // labels_refs is a list of label name-value pair references, encoded + // as indices to the Request.symbols array. This list's length is always + // a multiple of two, and the underlying labels should be sorted lexicographically. + // + // Note that there might be multiple TimeSeries objects in the same + // Requests with the same labels e.g. for different exemplars, metadata + // or created timestamp. + LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"` + // Timeseries messages can either specify samples or (native) histogram samples + // (histogram field), but not both. For a typical sender (real-time metric + // streaming), in healthy cases, there will be only one sample or histogram. + // + // Samples and histograms are sorted by timestamp (older first). + Samples []*Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples,omitempty"` + Histograms []*Histogram `protobuf:"bytes,3,rep,name=histograms,proto3" json:"histograms,omitempty"` + // exemplars represents an optional set of exemplars attached to this series' samples. + Exemplars []*Exemplar `protobuf:"bytes,4,rep,name=exemplars,proto3" json:"exemplars,omitempty"` + // metadata represents the metadata associated with the given series' samples. + Metadata *Metadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *TimeSeries) Reset() { + *x = TimeSeries{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TimeSeries) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSeries) ProtoMessage() {} + +func (x *TimeSeries) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimeSeries.ProtoReflect.Descriptor instead. +func (*TimeSeries) Descriptor() ([]byte, []int) { + return file_io_prometheus_write_v2_types_proto_rawDescGZIP(), []int{1} +} + +func (x *TimeSeries) GetLabelsRefs() []uint32 { + if x != nil { + return x.LabelsRefs + } + return nil +} + +func (x *TimeSeries) GetSamples() []*Sample { + if x != nil { + return x.Samples + } + return nil +} + +func (x *TimeSeries) GetHistograms() []*Histogram { + if x != nil { + return x.Histograms + } + return nil +} + +func (x *TimeSeries) GetExemplars() []*Exemplar { + if x != nil { + return x.Exemplars + } + return nil +} + +func (x *TimeSeries) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +// Exemplar is an additional information attached to some series' samples. +// It is typically used to attach an example trace or request ID associated with +// the metric changes. +type Exemplar struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // labels_refs is an optional list of label name-value pair references, encoded + // as indices to the Request.symbols array. This list's len is always + // a multiple of 2, and the underlying labels should be sorted lexicographically. + // If the exemplar references a trace it should use the `trace_id` label name, as a best practice. + LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"` + // value represents an exact example value. This can be useful when the exemplar + // is attached to a histogram, which only gives an estimated value through buckets. + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + // timestamp represents the timestamp of the exemplar in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (x *Exemplar) Reset() { + *x = Exemplar{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Exemplar) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Exemplar) ProtoMessage() {} + +func (x *Exemplar) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead. +func (*Exemplar) Descriptor() ([]byte, []int) { + return file_io_prometheus_write_v2_types_proto_rawDescGZIP(), []int{2} +} + +func (x *Exemplar) GetLabelsRefs() []uint32 { + if x != nil { + return x.LabelsRefs + } + return nil +} + +func (x *Exemplar) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +func (x *Exemplar) GetTimestamp() int64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +// Sample represents series sample. +type Sample struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // value of the sample. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + // timestamp represents timestamp of the sample in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // created_timestamp represents an optional created timestamp for the sample, + // in ms format. This information is typically used for counter, histogram (cumulative) + // or delta type metrics. + // + // For cumulative metrics, the created timestamp represents the time when the + // counter started counting (sometimes referred to as start timestamp), which + // can increase the accuracy of certain processing and query semantics (e.g. rates). + // + // Note that some receivers might require created timestamps for certain metric + // types; rejecting such samples within the Request as a result. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + // + // Note that the "optional" keyword is omitted due to efficiency and consistency. + // Zero value means value not set. If you need to use exactly zero value for + // the timestamp, use 1 millisecond before or after. + CreatedTimestamp int64 `protobuf:"varint,3,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` +} + +func (x *Sample) Reset() { + *x = Sample{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Sample) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Sample) ProtoMessage() {} + +func (x *Sample) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Sample.ProtoReflect.Descriptor instead. +func (*Sample) Descriptor() ([]byte, []int) { + return file_io_prometheus_write_v2_types_proto_rawDescGZIP(), []int{3} +} + +func (x *Sample) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +func (x *Sample) GetTimestamp() int64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +func (x *Sample) GetCreatedTimestamp() int64 { + if x != nil { + return x.CreatedTimestamp + } + return 0 +} + +// Metadata represents the metadata associated with the given series' samples. +type Metadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type Metadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=io.prometheus.write.v2.Metadata_MetricType" json:"type,omitempty"` + // help_ref is a reference to the Request.symbols array representing help + // text for the metric. Help is optional, reference should point to an empty string in + // such a case. + HelpRef uint32 `protobuf:"varint,3,opt,name=help_ref,json=helpRef,proto3" json:"help_ref,omitempty"` + // unit_ref is a reference to the Request.symbols array representing a unit + // for the metric. Unit is optional, reference should point to an empty string in + // such a case. + UnitRef uint32 `protobuf:"varint,4,opt,name=unit_ref,json=unitRef,proto3" json:"unit_ref,omitempty"` +} + +func (x *Metadata) Reset() { + *x = Metadata{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metadata) ProtoMessage() {} + +func (x *Metadata) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. +func (*Metadata) Descriptor() ([]byte, []int) { + return file_io_prometheus_write_v2_types_proto_rawDescGZIP(), []int{4} +} + +func (x *Metadata) GetType() Metadata_MetricType { + if x != nil { + return x.Type + } + return Metadata_METRIC_TYPE_UNSPECIFIED +} + +func (x *Metadata) GetHelpRef() uint32 { + if x != nil { + return x.HelpRef + } + return 0 +} + +func (x *Metadata) GetUnitRef() uint32 { + if x != nil { + return x.UnitRef + } + return 0 +} + +// A native histogram, also known as a sparse histogram. +// Original design doc: +// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit +// The appendix of this design doc also explains the concept of float +// histograms. This Histogram message can represent both, the usual +// integer histogram as well as a float histogram. +type Histogram struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Count: + // + // *Histogram_CountInt + // *Histogram_CountFloat + Count isHistogram_Count `protobuf_oneof:"count"` + Sum float64 `protobuf:"fixed64,3,opt,name=sum,proto3" json:"sum,omitempty"` // Sum of observations in the histogram. + // The schema defines the bucket schema. Currently, valid numbers + // are -53 and numbers in range of -4 <= n <= 8. More valid numbers might be + // added in future for new bucketing layouts. + // + // The schema equal to -53 means custom buckets. See + // custom_values field description for more details. + // + // Values between -4 and 8 represent base-2 bucket schema, where 1 + // is a bucket boundary in each case, and then each power of two is + // divided into 2^n (n is schema value) logarithmic buckets. Or in other words, + // each bucket boundary is the previous boundary times 2^(2^-n). + Schema int32 `protobuf:"zigzag32,4,opt,name=schema,proto3" json:"schema,omitempty"` + ZeroThreshold float64 `protobuf:"fixed64,5,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"` // Breadth of the zero bucket. + // Types that are assignable to ZeroCount: + // + // *Histogram_ZeroCountInt + // *Histogram_ZeroCountFloat + ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"` + // Negative Buckets. + NegativeSpans []*BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans,omitempty"` + // Use either "negative_deltas" or "negative_counts", the former for + // regular histograms with integer counts, the latter for + // float histograms. + NegativeDeltas []int64 `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"` // Absolute count of each bucket. + // Positive Buckets. + // + // In case of custom buckets (-53 schema value) the positive buckets are interpreted as follows: + // * The span offset+length points to an the index of the custom_values array + // or +Inf if pointing to the len of the array. + // * The counts and deltas have the same meaning as for exponential histograms. + PositiveSpans []*BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans,omitempty"` + // Use either "positive_deltas" or "positive_counts", the former for + // regular histograms with integer counts, the latter for + // float histograms. + PositiveDeltas []int64 `protobuf:"zigzag64,12,rep,packed,name=positive_deltas,json=positiveDeltas,proto3" json:"positive_deltas,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + PositiveCounts []float64 `protobuf:"fixed64,13,rep,packed,name=positive_counts,json=positiveCounts,proto3" json:"positive_counts,omitempty"` // Absolute count of each bucket. + ResetHint Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=io.prometheus.write.v2.Histogram_ResetHint" json:"reset_hint,omitempty"` + // timestamp represents timestamp of the sample in ms. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + Timestamp int64 `protobuf:"varint,15,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // custom_values is an additional field used by non-exponential bucketing layouts. + // + // For custom buckets (-53 schema value) custom_values specify monotonically + // increasing upper inclusive boundaries for the bucket counts with arbitrary + // widths for this histogram. In other words, custom_values represents custom, + // explicit bucketing that could have been converted from the classic histograms. + // + // Those bounds are then referenced by spans in positive_spans with corresponding positive + // counts of deltas (refer to positive_spans for more details). This way we can + // have encode sparse histograms with custom bucketing (many buckets are often + // not used). + // + // Note that for custom bounds, even negative observations are placed in the positive + // counts to simplify the implementation and avoid ambiguity of where to place + // an underflow bucket, e.g. (-2, 1]. Therefore negative buckets and + // the zero bucket are unused, if the schema indicates custom bucketing. + // + // For each upper boundary the previous boundary represent the lower exclusive + // boundary for that bucket. The first element is the upper inclusive boundary + // for the first bucket, which implicitly has a lower inclusive bound of -Inf. + // This is similar to "le" label semantics on classic histograms. You may add a + // bucket with an upper bound of 0 to make sure that you really have no negative + // observations, but in practice, native histogram rendering will show both with + // or without first upper boundary 0 and no negative counts as the same case. + // + // The last element is not only the upper inclusive bound of the last regular + // bucket, but implicitly the lower exclusive bound of the +Inf bucket. + CustomValues []float64 `protobuf:"fixed64,16,rep,packed,name=custom_values,json=customValues,proto3" json:"custom_values,omitempty"` + // created_timestamp represents an optional created timestamp for the histogram sample, + // in ms format. The created timestamp represents the time when the + // histogram started counting (sometimes referred to as start timestamp), which + // can increase the accuracy of certain processing and query semantics (e.g. rates). + // + // Note that some receivers might require created timestamps for certain metric + // types; rejecting such samples within the Request as a result. + // + // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go + // for conversion from/to time.Time to Prometheus timestamp. + // + // Note that the "optional" keyword is omitted due to efficiency and consistency. + // Zero value means value not set. If you need to use exactly zero value for + // the timestamp, use 1 millisecond before or after. + CreatedTimestamp int64 `protobuf:"varint,17,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` +} + +func (x *Histogram) Reset() { + *x = Histogram{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Histogram) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Histogram) ProtoMessage() {} + +func (x *Histogram) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Histogram.ProtoReflect.Descriptor instead. +func (*Histogram) Descriptor() ([]byte, []int) { + return file_io_prometheus_write_v2_types_proto_rawDescGZIP(), []int{5} +} + +func (m *Histogram) GetCount() isHistogram_Count { + if m != nil { + return m.Count + } + return nil +} + +func (x *Histogram) GetCountInt() uint64 { + if x, ok := x.GetCount().(*Histogram_CountInt); ok { + return x.CountInt + } + return 0 +} + +func (x *Histogram) GetCountFloat() float64 { + if x, ok := x.GetCount().(*Histogram_CountFloat); ok { + return x.CountFloat + } + return 0 +} + +func (x *Histogram) GetSum() float64 { + if x != nil { + return x.Sum + } + return 0 +} + +func (x *Histogram) GetSchema() int32 { + if x != nil { + return x.Schema + } + return 0 +} + +func (x *Histogram) GetZeroThreshold() float64 { + if x != nil { + return x.ZeroThreshold + } + return 0 +} + +func (m *Histogram) GetZeroCount() isHistogram_ZeroCount { + if m != nil { + return m.ZeroCount + } + return nil +} + +func (x *Histogram) GetZeroCountInt() uint64 { + if x, ok := x.GetZeroCount().(*Histogram_ZeroCountInt); ok { + return x.ZeroCountInt + } + return 0 +} + +func (x *Histogram) GetZeroCountFloat() float64 { + if x, ok := x.GetZeroCount().(*Histogram_ZeroCountFloat); ok { + return x.ZeroCountFloat + } + return 0 +} + +func (x *Histogram) GetNegativeSpans() []*BucketSpan { + if x != nil { + return x.NegativeSpans + } + return nil +} + +func (x *Histogram) GetNegativeDeltas() []int64 { + if x != nil { + return x.NegativeDeltas + } + return nil +} + +func (x *Histogram) GetNegativeCounts() []float64 { + if x != nil { + return x.NegativeCounts + } + return nil +} + +func (x *Histogram) GetPositiveSpans() []*BucketSpan { + if x != nil { + return x.PositiveSpans + } + return nil +} + +func (x *Histogram) GetPositiveDeltas() []int64 { + if x != nil { + return x.PositiveDeltas + } + return nil +} + +func (x *Histogram) GetPositiveCounts() []float64 { + if x != nil { + return x.PositiveCounts + } + return nil +} + +func (x *Histogram) GetResetHint() Histogram_ResetHint { + if x != nil { + return x.ResetHint + } + return Histogram_RESET_HINT_UNSPECIFIED +} + +func (x *Histogram) GetTimestamp() int64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +func (x *Histogram) GetCustomValues() []float64 { + if x != nil { + return x.CustomValues + } + return nil +} + +func (x *Histogram) GetCreatedTimestamp() int64 { + if x != nil { + return x.CreatedTimestamp + } + return 0 +} + +type isHistogram_Count interface { + isHistogram_Count() +} + +type Histogram_CountInt struct { + CountInt uint64 `protobuf:"varint,1,opt,name=count_int,json=countInt,proto3,oneof"` +} + +type Histogram_CountFloat struct { + CountFloat float64 `protobuf:"fixed64,2,opt,name=count_float,json=countFloat,proto3,oneof"` +} + +func (*Histogram_CountInt) isHistogram_Count() {} + +func (*Histogram_CountFloat) isHistogram_Count() {} + +type isHistogram_ZeroCount interface { + isHistogram_ZeroCount() +} + +type Histogram_ZeroCountInt struct { + ZeroCountInt uint64 `protobuf:"varint,6,opt,name=zero_count_int,json=zeroCountInt,proto3,oneof"` +} + +type Histogram_ZeroCountFloat struct { + ZeroCountFloat float64 `protobuf:"fixed64,7,opt,name=zero_count_float,json=zeroCountFloat,proto3,oneof"` +} + +func (*Histogram_ZeroCountInt) isHistogram_ZeroCount() {} + +func (*Histogram_ZeroCountFloat) isHistogram_ZeroCount() {} + +// A BucketSpan defines a number of consecutive buckets with their +// offset. Logically, it would be more straightforward to include the +// bucket counts in the Span. However, the protobuf representation is +// more compact in the way the data is structured here (with all the +// buckets in a single array separate from the Spans). +type BucketSpan struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative). + Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` // Length of consecutive buckets. +} + +func (x *BucketSpan) Reset() { + *x = BucketSpan{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BucketSpan) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BucketSpan) ProtoMessage() {} + +func (x *BucketSpan) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_write_v2_types_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead. +func (*BucketSpan) Descriptor() ([]byte, []int) { + return file_io_prometheus_write_v2_types_proto_rawDescGZIP(), []int{6} +} + +func (x *BucketSpan) GetOffset() int32 { + if x != nil { + return x.Offset + } + return 0 +} + +func (x *BucketSpan) GetLength() uint32 { + if x != nil { + return x.Length + } + return 0 +} + +var File_io_prometheus_write_v2_types_proto protoreflect.FileDescriptor + +var file_io_prometheus_write_v2_types_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x16, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x2e, 0x76, 0x32, 0x1a, 0x14, 0x67, 0x6f, + 0x67, 0x6f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x67, 0x6f, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x73, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, + 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x73, 0x12, 0x48, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x42, + 0x04, 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x04, 0x22, 0xc6, 0x02, 0x0a, 0x0a, 0x54, 0x69, 0x6d, 0x65, + 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x5f, 0x72, 0x65, 0x66, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0a, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x52, 0x65, 0x66, 0x73, 0x12, 0x3e, 0x0a, 0x07, 0x73, 0x61, 0x6d, 0x70, 0x6c, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, + 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x42, 0x04, 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x07, + 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x12, 0x47, 0x0a, 0x0a, 0x68, 0x69, 0x73, 0x74, 0x6f, + 0x67, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x69, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x42, 0x04, + 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x0a, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x73, + 0x12, 0x44, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x42, 0x04, 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x09, 0x65, 0x78, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x12, 0x42, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, + 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x04, 0xc8, 0xde, 0x1f, 0x00, + 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, + 0x22, 0x5f, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x1f, 0x0a, 0x0b, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x5f, 0x72, 0x65, 0x66, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0d, 0x52, 0x0a, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x66, 0x73, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x22, 0x69, 0x0a, 0x06, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x2b, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xe1, 0x02, 0x0a, + 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3f, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, + 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x68, 0x65, + 0x6c, 0x70, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x68, 0x65, + 0x6c, 0x70, 0x52, 0x65, 0x66, 0x12, 0x19, 0x0a, 0x08, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x72, 0x65, + 0x66, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x75, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x66, + 0x22, 0xdd, 0x01, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x1b, 0x0a, 0x17, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, + 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x55, 0x4e, + 0x54, 0x45, 0x52, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, + 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, + 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x03, 0x12, 0x1e, 0x0a, 0x1a, 0x4d, 0x45, 0x54, 0x52, 0x49, + 0x43, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x41, 0x55, 0x47, 0x45, 0x48, 0x49, 0x53, 0x54, + 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x17, 0x0a, 0x13, 0x4d, 0x45, 0x54, 0x52, 0x49, + 0x43, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x05, + 0x12, 0x14, 0x0a, 0x10, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x49, 0x4e, 0x46, 0x4f, 0x10, 0x06, 0x12, 0x18, 0x0a, 0x14, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x53, 0x45, 0x54, 0x10, 0x07, + 0x22, 0xf1, 0x06, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x1d, + 0x0a, 0x09, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x04, 0x48, 0x00, 0x52, 0x08, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x74, 0x12, 0x21, 0x0a, + 0x0b, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x73, 0x75, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x73, + 0x75, 0x6d, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x7a, 0x65, + 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, + 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, + 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x48, 0x01, 0x52, 0x0c, 0x7a, 0x65, 0x72, + 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x10, 0x7a, 0x65, 0x72, + 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x01, 0x48, 0x01, 0x52, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x4f, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, + 0x6e, 0x42, 0x04, 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x12, 0x52, + 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x12, + 0x27, 0x0a, 0x0f, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x4f, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x53, 0x70, 0x61, 0x6e, 0x42, 0x04, 0xc8, 0xde, 0x1f, 0x00, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6f, 0x73, + 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x18, 0x0c, 0x20, 0x03, + 0x28, 0x12, 0x52, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, + 0x61, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0e, 0x70, 0x6f, 0x73, + 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x4a, 0x0a, 0x0a, 0x72, + 0x65, 0x73, 0x65, 0x74, 0x5f, 0x68, 0x69, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x2b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, + 0x61, 0x6d, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x48, 0x69, 0x6e, 0x74, 0x52, 0x09, 0x72, 0x65, + 0x73, 0x65, 0x74, 0x48, 0x69, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0c, 0x63, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x64, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x65, 0x74, + 0x48, 0x69, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x16, 0x52, 0x45, 0x53, 0x45, 0x54, 0x5f, 0x48, 0x49, + 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x53, 0x45, 0x54, 0x5f, 0x48, 0x49, 0x4e, 0x54, 0x5f, 0x59, + 0x45, 0x53, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x52, 0x45, 0x53, 0x45, 0x54, 0x5f, 0x48, 0x49, + 0x4e, 0x54, 0x5f, 0x4e, 0x4f, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x53, 0x45, 0x54, + 0x5f, 0x48, 0x49, 0x4e, 0x54, 0x5f, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10, 0x03, 0x42, 0x07, 0x0a, + 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x0c, 0x0a, 0x0a, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, + 0x61, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x11, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x42, 0x09, 0x5a, 0x07, 0x77, 0x72, 0x69, 0x74, 0x65, 0x76, 0x32, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_io_prometheus_write_v2_types_proto_rawDescOnce sync.Once + file_io_prometheus_write_v2_types_proto_rawDescData = file_io_prometheus_write_v2_types_proto_rawDesc +) + +func file_io_prometheus_write_v2_types_proto_rawDescGZIP() []byte { + file_io_prometheus_write_v2_types_proto_rawDescOnce.Do(func() { + file_io_prometheus_write_v2_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_write_v2_types_proto_rawDescData) + }) + return file_io_prometheus_write_v2_types_proto_rawDescData +} + +var file_io_prometheus_write_v2_types_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_io_prometheus_write_v2_types_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_io_prometheus_write_v2_types_proto_goTypes = []interface{}{ + (Metadata_MetricType)(0), // 0: io.prometheus.write.v2.Metadata.MetricType + (Histogram_ResetHint)(0), // 1: io.prometheus.write.v2.Histogram.ResetHint + (*Request)(nil), // 2: io.prometheus.write.v2.Request + (*TimeSeries)(nil), // 3: io.prometheus.write.v2.TimeSeries + (*Exemplar)(nil), // 4: io.prometheus.write.v2.Exemplar + (*Sample)(nil), // 5: io.prometheus.write.v2.Sample + (*Metadata)(nil), // 6: io.prometheus.write.v2.Metadata + (*Histogram)(nil), // 7: io.prometheus.write.v2.Histogram + (*BucketSpan)(nil), // 8: io.prometheus.write.v2.BucketSpan +} +var file_io_prometheus_write_v2_types_proto_depIdxs = []int32{ + 3, // 0: io.prometheus.write.v2.Request.timeseries:type_name -> io.prometheus.write.v2.TimeSeries + 5, // 1: io.prometheus.write.v2.TimeSeries.samples:type_name -> io.prometheus.write.v2.Sample + 7, // 2: io.prometheus.write.v2.TimeSeries.histograms:type_name -> io.prometheus.write.v2.Histogram + 4, // 3: io.prometheus.write.v2.TimeSeries.exemplars:type_name -> io.prometheus.write.v2.Exemplar + 6, // 4: io.prometheus.write.v2.TimeSeries.metadata:type_name -> io.prometheus.write.v2.Metadata + 0, // 5: io.prometheus.write.v2.Metadata.type:type_name -> io.prometheus.write.v2.Metadata.MetricType + 8, // 6: io.prometheus.write.v2.Histogram.negative_spans:type_name -> io.prometheus.write.v2.BucketSpan + 8, // 7: io.prometheus.write.v2.Histogram.positive_spans:type_name -> io.prometheus.write.v2.BucketSpan + 1, // 8: io.prometheus.write.v2.Histogram.reset_hint:type_name -> io.prometheus.write.v2.Histogram.ResetHint + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_io_prometheus_write_v2_types_proto_init() } +func file_io_prometheus_write_v2_types_proto_init() { + if File_io_prometheus_write_v2_types_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_io_prometheus_write_v2_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_write_v2_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TimeSeries); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_write_v2_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Exemplar); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_write_v2_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Sample); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_write_v2_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_write_v2_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Histogram); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_write_v2_types_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BucketSpan); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_io_prometheus_write_v2_types_proto_msgTypes[5].OneofWrappers = []interface{}{ + (*Histogram_CountInt)(nil), + (*Histogram_CountFloat)(nil), + (*Histogram_ZeroCountInt)(nil), + (*Histogram_ZeroCountFloat)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_io_prometheus_write_v2_types_proto_rawDesc, + NumEnums: 2, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_io_prometheus_write_v2_types_proto_goTypes, + DependencyIndexes: file_io_prometheus_write_v2_types_proto_depIdxs, + EnumInfos: file_io_prometheus_write_v2_types_proto_enumTypes, + MessageInfos: file_io_prometheus_write_v2_types_proto_msgTypes, + }.Build() + File_io_prometheus_write_v2_types_proto = out.File + file_io_prometheus_write_v2_types_proto_rawDesc = nil + file_io_prometheus_write_v2_types_proto_goTypes = nil + file_io_prometheus_write_v2_types_proto_depIdxs = nil +} diff --git a/google/cmd/prw2gcm/io/prometheus/write/v2/types_vtproto.pb.go b/google/cmd/prw2gcm/io/prometheus/write/v2/types_vtproto.pb.go new file mode 100644 index 0000000000..1b54f53fbe --- /dev/null +++ b/google/cmd/prw2gcm/io/prometheus/write/v2/types_vtproto.pb.go @@ -0,0 +1,2546 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.6.0 +// source: io/prometheus/write/v2/types.proto + +package writev2 + +import ( + binary "encoding/binary" + fmt "fmt" + protohelpers "github.com/planetscale/vtprotobuf/protohelpers" + proto "google.golang.org/protobuf/proto" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + math "math" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *Request) CloneVT() *Request { + if m == nil { + return (*Request)(nil) + } + r := new(Request) + if rhs := m.Symbols; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Symbols = tmpContainer + } + if rhs := m.Timeseries; rhs != nil { + tmpContainer := make([]*TimeSeries, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Timeseries = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Request) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *TimeSeries) CloneVT() *TimeSeries { + if m == nil { + return (*TimeSeries)(nil) + } + r := new(TimeSeries) + r.Metadata = m.Metadata.CloneVT() + if rhs := m.LabelsRefs; rhs != nil { + tmpContainer := make([]uint32, len(rhs)) + copy(tmpContainer, rhs) + r.LabelsRefs = tmpContainer + } + if rhs := m.Samples; rhs != nil { + tmpContainer := make([]*Sample, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Samples = tmpContainer + } + if rhs := m.Histograms; rhs != nil { + tmpContainer := make([]*Histogram, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Histograms = tmpContainer + } + if rhs := m.Exemplars; rhs != nil { + tmpContainer := make([]*Exemplar, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Exemplars = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *TimeSeries) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Exemplar) CloneVT() *Exemplar { + if m == nil { + return (*Exemplar)(nil) + } + r := new(Exemplar) + r.Value = m.Value + r.Timestamp = m.Timestamp + if rhs := m.LabelsRefs; rhs != nil { + tmpContainer := make([]uint32, len(rhs)) + copy(tmpContainer, rhs) + r.LabelsRefs = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Exemplar) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Sample) CloneVT() *Sample { + if m == nil { + return (*Sample)(nil) + } + r := new(Sample) + r.Value = m.Value + r.Timestamp = m.Timestamp + r.CreatedTimestamp = m.CreatedTimestamp + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Sample) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Metadata) CloneVT() *Metadata { + if m == nil { + return (*Metadata)(nil) + } + r := new(Metadata) + r.Type = m.Type + r.HelpRef = m.HelpRef + r.UnitRef = m.UnitRef + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Metadata) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Histogram) CloneVT() *Histogram { + if m == nil { + return (*Histogram)(nil) + } + r := new(Histogram) + r.Sum = m.Sum + r.Schema = m.Schema + r.ZeroThreshold = m.ZeroThreshold + r.ResetHint = m.ResetHint + r.Timestamp = m.Timestamp + r.CreatedTimestamp = m.CreatedTimestamp + if m.Count != nil { + r.Count = m.Count.(interface{ CloneVT() isHistogram_Count }).CloneVT() + } + if m.ZeroCount != nil { + r.ZeroCount = m.ZeroCount.(interface{ CloneVT() isHistogram_ZeroCount }).CloneVT() + } + if rhs := m.NegativeSpans; rhs != nil { + tmpContainer := make([]*BucketSpan, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.NegativeSpans = tmpContainer + } + if rhs := m.NegativeDeltas; rhs != nil { + tmpContainer := make([]int64, len(rhs)) + copy(tmpContainer, rhs) + r.NegativeDeltas = tmpContainer + } + if rhs := m.NegativeCounts; rhs != nil { + tmpContainer := make([]float64, len(rhs)) + copy(tmpContainer, rhs) + r.NegativeCounts = tmpContainer + } + if rhs := m.PositiveSpans; rhs != nil { + tmpContainer := make([]*BucketSpan, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.PositiveSpans = tmpContainer + } + if rhs := m.PositiveDeltas; rhs != nil { + tmpContainer := make([]int64, len(rhs)) + copy(tmpContainer, rhs) + r.PositiveDeltas = tmpContainer + } + if rhs := m.PositiveCounts; rhs != nil { + tmpContainer := make([]float64, len(rhs)) + copy(tmpContainer, rhs) + r.PositiveCounts = tmpContainer + } + if rhs := m.CustomValues; rhs != nil { + tmpContainer := make([]float64, len(rhs)) + copy(tmpContainer, rhs) + r.CustomValues = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Histogram) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Histogram_CountInt) CloneVT() isHistogram_Count { + if m == nil { + return (*Histogram_CountInt)(nil) + } + r := new(Histogram_CountInt) + r.CountInt = m.CountInt + return r +} + +func (m *Histogram_CountFloat) CloneVT() isHistogram_Count { + if m == nil { + return (*Histogram_CountFloat)(nil) + } + r := new(Histogram_CountFloat) + r.CountFloat = m.CountFloat + return r +} + +func (m *Histogram_ZeroCountInt) CloneVT() isHistogram_ZeroCount { + if m == nil { + return (*Histogram_ZeroCountInt)(nil) + } + r := new(Histogram_ZeroCountInt) + r.ZeroCountInt = m.ZeroCountInt + return r +} + +func (m *Histogram_ZeroCountFloat) CloneVT() isHistogram_ZeroCount { + if m == nil { + return (*Histogram_ZeroCountFloat)(nil) + } + r := new(Histogram_ZeroCountFloat) + r.ZeroCountFloat = m.ZeroCountFloat + return r +} + +func (m *BucketSpan) CloneVT() *BucketSpan { + if m == nil { + return (*BucketSpan)(nil) + } + r := new(BucketSpan) + r.Offset = m.Offset + r.Length = m.Length + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *BucketSpan) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *Request) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Request) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Request) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Timeseries) > 0 { + for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Timeseries[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Symbols) > 0 { + for iNdEx := len(m.Symbols) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Symbols[iNdEx]) + copy(dAtA[i:], m.Symbols[iNdEx]) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Symbols[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + return len(dAtA) - i, nil +} + +func (m *TimeSeries) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeries) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TimeSeries) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Metadata != nil { + size, err := m.Metadata.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Exemplars[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Histograms) > 0 { + for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Histograms[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Samples[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.LabelsRefs) > 0 { + var pksize2 int + for _, num := range m.LabelsRefs { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num := range m.LabelsRefs { + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Exemplar) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Exemplar) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Exemplar) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Timestamp != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x18 + } + if m.Value != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if len(m.LabelsRefs) > 0 { + var pksize2 int + for _, num := range m.LabelsRefs { + pksize2 += protohelpers.SizeOfVarint(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num := range m.LabelsRefs { + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Sample) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sample) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Sample) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.CreatedTimestamp != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CreatedTimestamp)) + i-- + dAtA[i] = 0x18 + } + if m.Timestamp != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if m.Value != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Metadata) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metadata) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Metadata) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.UnitRef != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.UnitRef)) + i-- + dAtA[i] = 0x20 + } + if m.HelpRef != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HelpRef)) + i-- + dAtA[i] = 0x18 + } + if m.Type != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Histogram) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Histogram) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Histogram) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if vtmsg, ok := m.ZeroCount.(interface { + MarshalToSizedBufferVT([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if vtmsg, ok := m.Count.(interface { + MarshalToSizedBufferVT([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.CreatedTimestamp != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CreatedTimestamp)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if len(m.CustomValues) > 0 { + for iNdEx := len(m.CustomValues) - 1; iNdEx >= 0; iNdEx-- { + f1 := math.Float64bits(float64(m.CustomValues[iNdEx])) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1)) + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CustomValues)*8)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.Timestamp != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x78 + } + if m.ResetHint != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ResetHint)) + i-- + dAtA[i] = 0x70 + } + if len(m.PositiveCounts) > 0 { + for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- { + f2 := math.Float64bits(float64(m.PositiveCounts[iNdEx])) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(f2)) + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PositiveCounts)*8)) + i-- + dAtA[i] = 0x6a + } + if len(m.PositiveDeltas) > 0 { + var pksize4 int + for _, num := range m.PositiveDeltas { + pksize4 += protohelpers.SizeOfZigzag(uint64(num)) + } + i -= pksize4 + j3 := i + for _, num := range m.PositiveDeltas { + x5 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x5 >= 1<<7 { + dAtA[j3] = uint8(uint64(x5)&0x7f | 0x80) + j3++ + x5 >>= 7 + } + dAtA[j3] = uint8(x5) + j3++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize4)) + i-- + dAtA[i] = 0x62 + } + if len(m.PositiveSpans) > 0 { + for iNdEx := len(m.PositiveSpans) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.PositiveSpans[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + } + if len(m.NegativeCounts) > 0 { + for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- { + f6 := math.Float64bits(float64(m.NegativeCounts[iNdEx])) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(f6)) + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.NegativeCounts)*8)) + i-- + dAtA[i] = 0x52 + } + if len(m.NegativeDeltas) > 0 { + var pksize8 int + for _, num := range m.NegativeDeltas { + pksize8 += protohelpers.SizeOfZigzag(uint64(num)) + } + i -= pksize8 + j7 := i + for _, num := range m.NegativeDeltas { + x9 := (uint64(num) << 1) ^ uint64((num >> 63)) + for x9 >= 1<<7 { + dAtA[j7] = uint8(uint64(x9)&0x7f | 0x80) + j7++ + x9 >>= 7 + } + dAtA[j7] = uint8(x9) + j7++ + } + i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize8)) + i-- + dAtA[i] = 0x4a + } + if len(m.NegativeSpans) > 0 { + for iNdEx := len(m.NegativeSpans) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.NegativeSpans[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + } + if m.ZeroThreshold != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold)))) + i-- + dAtA[i] = 0x29 + } + if m.Schema != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31)))) + i-- + dAtA[i] = 0x20 + } + if m.Sum != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) + i-- + dAtA[i] = 0x19 + } + return len(dAtA) - i, nil +} + +func (m *Histogram_CountInt) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Histogram_CountInt) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CountInt)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Histogram_CountFloat) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Histogram_CountFloat) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CountFloat)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountInt) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Histogram_ZeroCountInt) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ZeroCountInt)) + i-- + dAtA[i] = 0x30 + return len(dAtA) - i, nil +} +func (m *Histogram_ZeroCountFloat) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Histogram_ZeroCountFloat) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat)))) + i-- + dAtA[i] = 0x39 + return len(dAtA) - i, nil +} +func (m *BucketSpan) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BucketSpan) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *BucketSpan) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Length != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x10 + } + if m.Offset != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31)))) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Request) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Symbols) > 0 { + for _, s := range m.Symbols { + l = len(s) + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Timeseries) > 0 { + for _, e := range m.Timeseries { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *TimeSeries) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LabelsRefs) > 0 { + l = 0 + for _, e := range m.LabelsRefs { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if len(m.Samples) > 0 { + for _, e := range m.Samples { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Histograms) > 0 { + for _, e := range m.Histograms { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.Exemplars) > 0 { + for _, e := range m.Exemplars { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if m.Metadata != nil { + l = m.Metadata.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Exemplar) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LabelsRefs) > 0 { + l = 0 + for _, e := range m.LabelsRefs { + l += protohelpers.SizeOfVarint(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if m.Value != 0 { + n += 9 + } + if m.Timestamp != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Timestamp)) + } + n += len(m.unknownFields) + return n +} + +func (m *Sample) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.Timestamp != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Timestamp)) + } + if m.CreatedTimestamp != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.CreatedTimestamp)) + } + n += len(m.unknownFields) + return n +} + +func (m *Metadata) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Type)) + } + if m.HelpRef != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.HelpRef)) + } + if m.UnitRef != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.UnitRef)) + } + n += len(m.unknownFields) + return n +} + +func (m *Histogram) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Count.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if m.Sum != 0 { + n += 9 + } + if m.Schema != 0 { + n += 1 + protohelpers.SizeOfZigzag(uint64(m.Schema)) + } + if m.ZeroThreshold != 0 { + n += 9 + } + if vtmsg, ok := m.ZeroCount.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if len(m.NegativeSpans) > 0 { + for _, e := range m.NegativeSpans { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.NegativeDeltas) > 0 { + l = 0 + for _, e := range m.NegativeDeltas { + l += protohelpers.SizeOfZigzag(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if len(m.NegativeCounts) > 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(len(m.NegativeCounts)*8)) + len(m.NegativeCounts)*8 + } + if len(m.PositiveSpans) > 0 { + for _, e := range m.PositiveSpans { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.PositiveDeltas) > 0 { + l = 0 + for _, e := range m.PositiveDeltas { + l += protohelpers.SizeOfZigzag(uint64(e)) + } + n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l + } + if len(m.PositiveCounts) > 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(len(m.PositiveCounts)*8)) + len(m.PositiveCounts)*8 + } + if m.ResetHint != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ResetHint)) + } + if m.Timestamp != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Timestamp)) + } + if len(m.CustomValues) > 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(len(m.CustomValues)*8)) + len(m.CustomValues)*8 + } + if m.CreatedTimestamp != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.CreatedTimestamp)) + } + n += len(m.unknownFields) + return n +} + +func (m *Histogram_CountInt) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.CountInt)) + return n +} +func (m *Histogram_CountFloat) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Histogram_ZeroCountInt) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + protohelpers.SizeOfVarint(uint64(m.ZeroCountInt)) + return n +} +func (m *Histogram_ZeroCountFloat) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *BucketSpan) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Offset != 0 { + n += 1 + protohelpers.SizeOfZigzag(uint64(m.Offset)) + } + if m.Length != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Length)) + } + n += len(m.unknownFields) + return n +} + +func (m *Request) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Symbols", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Symbols = append(m.Symbols, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Timeseries = append(m.Timeseries, &TimeSeries{}) + if err := m.Timeseries[len(m.Timeseries)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeSeries) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.LabelsRefs) == 0 { + m.LabelsRefs = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType) + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Samples = append(m.Samples, &Sample{}) + if err := m.Samples[len(m.Samples)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Histograms = append(m.Histograms, &Histogram{}) + if err := m.Histograms[len(m.Histograms)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exemplars = append(m.Exemplars, &Exemplar{}) + if err := m.Exemplars[len(m.Exemplars)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &Metadata{} + } + if err := m.Metadata.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Exemplar) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Exemplar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.LabelsRefs) == 0 { + m.LabelsRefs = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LabelsRefs = append(m.LabelsRefs, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType) + } + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sample) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sample: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sample: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType) + } + m.CreatedTimestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedTimestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metadata) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Metadata_MetricType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HelpRef", wireType) + } + m.HelpRef = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HelpRef |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnitRef", wireType) + } + m.UnitRef = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnitRef |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Histogram) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Histogram: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CountInt", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Count = &Histogram_CountInt{CountInt: v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field CountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Count = &Histogram_CountFloat{CountFloat: float64(math.Float64frombits(v))} + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Sum = float64(math.Float64frombits(v)) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Schema = v + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroThreshold = float64(math.Float64frombits(v)) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountInt", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ZeroCount = &Histogram_ZeroCountInt{ZeroCountInt: v} + case 7: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCountFloat", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroCount = &Histogram_ZeroCountFloat{ZeroCountFloat: float64(math.Float64frombits(v))} + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NegativeSpans = append(m.NegativeSpans, &BucketSpan{}) + if err := m.NegativeSpans[len(m.NegativeSpans)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.NegativeDeltas) == 0 { + m.NegativeDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.NegativeDeltas = append(m.NegativeDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeDeltas", wireType) + } + case 10: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.NegativeCounts) == 0 { + m.NegativeCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.NegativeCounts = append(m.NegativeCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field NegativeCounts", wireType) + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PositiveSpans = append(m.PositiveSpans, &BucketSpan{}) + if err := m.PositiveSpans[len(m.PositiveSpans)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.PositiveDeltas) == 0 { + m.PositiveDeltas = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = (v >> 1) ^ uint64((int64(v&1)<<63)>>63) + m.PositiveDeltas = append(m.PositiveDeltas, int64(v)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveDeltas", wireType) + } + case 13: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.PositiveCounts) == 0 { + m.PositiveCounts = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.PositiveCounts = append(m.PositiveCounts, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field PositiveCounts", wireType) + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResetHint", wireType) + } + m.ResetHint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResetHint |= Histogram_ResetHint(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 16: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.CustomValues = append(m.CustomValues, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.CustomValues) == 0 { + m.CustomValues = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.CustomValues = append(m.CustomValues, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field CustomValues", wireType) + } + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType) + } + m.CreatedTimestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedTimestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BucketSpan) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BucketSpan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BucketSpan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Offset = v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/google/cmd/prw2gcm/main.go b/google/cmd/prw2gcm/main.go new file mode 100644 index 0000000000..d102a98c1c --- /dev/null +++ b/google/cmd/prw2gcm/main.go @@ -0,0 +1,146 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "flag" + "fmt" + "log/slog" + "net/http" + "net/http/pprof" + "os" + "strings" + "syscall" + "time" + + "cloud.google.com/go/compute/metadata" + "github.com/oklog/run" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/prometheus/common/version" + "github.com/prometheus/prometheus/google/export/setup" +) + +const ( + // NOTE: Similar to other Prometheus HTTP APIs, we assume it will be under the + // https://monitoring.googleapis.com/v1/projects/PROJECT_ID/location/global/prometheus/api/v1 path. + // + // See: https://cloud.google.com/stackdriver/docs/managed-prometheus/query-api-ui#api-prometheus + pathPrefix = "/v1/projects/" + pathSuffix = "/location/global/prometheus/api/v1/write" +) + +var ( + listenAddress = flag.String("listen-address", ":19091", + "Address on which to expose metrics and the Remote Write 2.x handler.") + logLevelFlag = flag.String("log.level", "info", "Logging level; available values: 'debug', 'info', 'warn', 'error'.") + forwardCredentials = flag.Bool("gcm.forward-credentials", false, + "Enables mode where proxy will expect an HTTP 'Authorization' header with the Bearer token in the incoming Remote Write 2.x requests. "+ + "This token will be then forwarded to the gRPC GCM unary call. This mode allows running this proxy as a Prometheus sidecar, using Prometheus remote_write"+ + " auth setup. IMPORTANT: For this mode proxy, in the insecure environments, proxy should be served behind TLS.") + credentialsFile = flag.String("gcm.credentials-file", "", + "File with JSON-encoded credentials (service account or refresh token). Can be left empty if default credentials have sufficient permission.") + gcmEndpoint = flag.String("gcm.endpoint", "monitoring.googleapis.com:443", + "GCM API endpoint to send metric data to.") + gcmUserAgentMode = flag.String("gcm.user-agent-mode", setup.UAModeUnspecified, fmt.Sprintf("Mode for user agent used for requests against the GCM API. Valid values are %q, %q, %q, %q or %q.", setup.UAModeGKE, setup.UAModeKubectl, setup.UAModeAVMW, setup.UAModeABM, setup.UAModeUnspecified)) +) + +// TODO: Add TLS options, recommended if this proxy should forward auth headers. +func main() { + flag.Parse() + + var logLevel slog.Level + if err := logLevel.UnmarshalText([]byte(*logLevelFlag)); err != nil { + println("failed to parse -log.level flag", err) + os.Exit(1) + } + logger := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: logLevel})) + + metrics := prometheus.NewRegistry() + metrics.MustRegister( + collectors.NewGoCollector(), + collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}), + ) + + var g run.Group + g.Add(run.SignalHandler(context.Background(), os.Interrupt, syscall.SIGTERM)) + { + env := setup.UAEnvUnspecified + // Default target fields if we can detect them in GCP. + if metadata.OnGCE() { + env = setup.UAEnvGCE + cluster, _ := metadata.InstanceAttributeValue("cluster-name") + if cluster != "" { + env = setup.UAEnvGKE + } + } + + mux := http.NewServeMux() + mux.Handle("/metrics", promhttp.HandlerFor(metrics, promhttp.HandlerOpts{Registry: metrics, EnableOpenMetrics: true})) + mux.HandleFunc("/-/healthy", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, "prw2gcm is Healthy.\n") + }) + mux.HandleFunc("/-/ready", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, "prw2gcm is Ready.\n") + }) + mux.HandleFunc("/debug/pprof/", pprof.Index) + mux.HandleFunc("/debug/pprof/profile", pprof.Profile) + mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + mux.HandleFunc("/debug/pprof/trace", pprof.Trace) + + // Identity User Agent for all gRPC requests. + ua := strings.TrimSpace(fmt.Sprintf("%s/%s %s (env:%s;mode:%s)", + "prw2gcm", version.Version, "prw2-gcm", env, *gcmUserAgentMode)) + p, err := newProxy( + context.Background(), + proxyOpts{ + logger: logger, + credentialsFile: *credentialsFile, + forwardCredentials: *forwardCredentials, + endpoint: *gcmEndpoint, + defaultUserAgent: ua, + }, + ) + if err != nil { + logger.Error("failed to create proxy", "err", err) + os.Exit(1) + } + + mux.Handle(p.Handler(metrics)) + server := &http.Server{ + Handler: mux, + Addr: *listenAddress, + } + g.Add(func() error { + logger.Info("starting web server for metrics", "listen", *listenAddress) + return server.ListenAndServe() + }, func(err error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + _ = server.Shutdown(ctx) + }) + } + + logger.Info("starting prw2gcm...") + if err := g.Run(); err != nil { + logger.Error("prw2gcm failed", "err", err) + os.Exit(1) + } + logger.Info("prw2gcm finished") +} diff --git a/google/cmd/prw2gcm/middleware.go b/google/cmd/prw2gcm/middleware.go new file mode 100644 index 0000000000..34fe68d667 --- /dev/null +++ b/google/cmd/prw2gcm/middleware.go @@ -0,0 +1,132 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "fmt" + "net/http" + "regexp" + "strings" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +type projectIDCtxKeyType struct{} + +var ( + projectIDCtxKey projectIDCtxKeyType + projectIDRe = regexp.MustCompile("^[a-z-0-9]+$") +) + +func detectPOSTMethodProjectID(handler http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + http.NotFound(w, r) + return + } + + // Retrieve and sanitize the PROJECT_ID from the URL path like with + // the '/v1/projects/PROJECT_ID/location/global/prometheus/api/v1/write' format. + // This follows similar format to Google Prometheus HTTP API. + urlPath := r.URL.Path + if !strings.HasPrefix(urlPath, pathPrefix) { + http.NotFound(w, r) + return + } + urlPath = strings.TrimPrefix(urlPath, pathPrefix) + + if !strings.HasSuffix(urlPath, pathSuffix) { + http.NotFound(w, r) + return + } + + projectID := strings.TrimSuffix(urlPath, pathSuffix) + if !projectIDRe.MatchString(projectID) { + http.Error(w, fmt.Sprintf("PROJECT_ID detected from the '%sPROJECT_ID%s' path has unsupported value, got %q, expected value that matches %q; URL path %q", pathPrefix, pathSuffix, projectID, projectIDRe.String(), r.URL.Path), http.StatusNotFound) + return + } + ctx := context.WithValue(r.Context(), projectIDCtxKey, projectID) + handler.ServeHTTP(w, r.WithContext(ctx)) + } +} + +func getProjectID(ctx context.Context) string { + ret, ok := ctx.Value(projectIDCtxKey).(string) + if !ok { + return "unknown" + } + return ret +} + +func instrument(reg prometheus.Registerer, handlerName string, handler http.Handler) http.HandlerFunc { + reg = prometheus.WrapRegistererWith(prometheus.Labels{"handler": handlerName}, reg) + + requestDuration := promauto.With(reg).NewHistogramVec( + prometheus.HistogramOpts{ + Name: "http_request_duration_seconds", + Help: "Tracks the latencies for HTTP requests.", + + NativeHistogramBucketFactor: 1.1, + }, + []string{"method", "code"}, + ) + requestSize := promauto.With(reg).NewHistogramVec( + prometheus.HistogramOpts{ + Name: "http_request_size_bytes", + Help: "Tracks the size of HTTP requests.", + + // Custom buckets, so key metric is visible in the text format (for testing and local debugging). + Buckets: []float64{0, 200, 1024, 2048, 10240}, + + NativeHistogramBucketFactor: 1.1, + }, + []string{"method", "code"}, + ) + requestsTotal := promauto.With(reg).NewCounterVec( + prometheus.CounterOpts{ + Name: "http_requests_total", + Help: "Tracks the number of HTTP requests.", + }, []string{"method", "code"}, + ) + responseSize := promauto.With(reg).NewHistogramVec( + prometheus.HistogramOpts{ + Name: "http_response_size_bytes", + Help: "Tracks the size of HTTP responses.", + + NativeHistogramBucketFactor: 1.1, + }, + []string{"method", "code"}, + ) + + base := promhttp.InstrumentHandlerRequestSize( + requestSize, + promhttp.InstrumentHandlerCounter( + requestsTotal, + promhttp.InstrumentHandlerResponseSize( + responseSize, + promhttp.InstrumentHandlerDuration( + requestDuration, + http.HandlerFunc(func(writer http.ResponseWriter, r *http.Request) { + handler.ServeHTTP(writer, r) + }), + ), + ), + ), + ) + return base.ServeHTTP +} diff --git a/google/cmd/prw2gcm/middleware_test.go b/google/cmd/prw2gcm/middleware_test.go new file mode 100644 index 0000000000..e623c153d7 --- /dev/null +++ b/google/cmd/prw2gcm/middleware_test.go @@ -0,0 +1,54 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestDetectPOSTMethodProjectID(t *testing.T) { + var got string + mux := http.NewServeMux() + mux.Handle(pathPrefix, detectPOSTMethodProjectID(func(_ http.ResponseWriter, r *http.Request) { + got = getProjectID(r.Context()) + })) + srv := httptest.NewServer(mux) + t.Cleanup(srv.Close) + + t.Run("wrong path", func(t *testing.T) { + u := fmt.Sprintf("%s/v1/NOT/my-project1/location/global/prometheus/api/v1/write", srv.URL) + r, err := srv.Client().Post(u, "application/x-www-form-urlencoded", http.NoBody) + require.NoError(t, err) + require.Equal(t, http.StatusNotFound, r.StatusCode) + }) + t.Run("wrong project id", func(t *testing.T) { + u := fmt.Sprintf("%s/v1/projects/gs§fs§f1/location/global/prometheus/api/v1/write", srv.URL) + r, err := srv.Client().Post(u, "application/x-www-form-urlencoded", http.NoBody) + require.NoError(t, err) + require.Equal(t, http.StatusNotFound, r.StatusCode) + }) + t.Run("project id", func(t *testing.T) { + u := fmt.Sprintf("%s/v1/projects/my-project1/location/global/prometheus/api/v1/write", srv.URL) + r, err := srv.Client().Post(u, "application/x-www-form-urlencoded", http.NoBody) + require.NoError(t, err) + require.Equal(t, http.StatusOK, r.StatusCode) + require.Equal(t, "my-project1", got) + }) +} diff --git a/google/cmd/prw2gcm/proxy.go b/google/cmd/prw2gcm/proxy.go new file mode 100644 index 0000000000..e076960310 --- /dev/null +++ b/google/cmd/prw2gcm/proxy.go @@ -0,0 +1,300 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "errors" + "fmt" + "io" + "log/slog" + "net/http" + "path" + "strings" + "sync" + + "cloud.google.com/go/auth" + monitoring "cloud.google.com/go/monitoring/apiv3/v2" + monitoring_pb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/prometheus/client_golang/exp/api/remote" + "github.com/prometheus/client_golang/prometheus" + writev2 "github.com/prometheus/prometheus/google/cmd/prw2gcm/io/prometheus/write/v2" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +type proxy struct { + opts proxyOpts + client *monitoring.MetricClient +} + +type proxyOpts struct { + logger *slog.Logger + endpoint string + defaultUserAgent string + + // credentialsFile is a path with the Google service account JSON file to use + // for GCM gRPC calls. If no credentials are set, proxy will try to deduce default credentials. + credentialsFile string + + // forwardCredentials enables mode where proxy will + // expect an HTTP 'Authorization' header with the Bearer token in the incoming + // Remote Write 2.x requests. This token will be then forwarded to the gRPC GCM unary call. + // This mode allows running this proxy as a Prometheus sidecar, using Prometheus remote_write + // auth setup. This flow is not uncommon e.g. see https://github.com/grpc-ecosystem/grpc-gateway/blob/2fba1914fcc12696707a5dfa91dbf92cdb7af555/runtime/context.go#L136 + // IMPORTANT: For this mode proxy, in the insecure environments, proxy + // should be served behind TLS. + forwardCredentials bool +} + +var ( + _ auth.TokenProvider = &perRequestAuthProvider{} +) + +type requestAuthTokenCtxKeyType struct{} + +var ( + requestAuthTokenCtxKey requestAuthTokenCtxKeyType +) + +// perRequestAuthProvider allows providing access auth token on every gRPC call, +// if the given context has a token under requestAuthTokenCtxKey. +// If no token is found in the context, error is returned, causing gRPC call to fail. +type perRequestAuthProvider struct{} + +func (p *perRequestAuthProvider) Token(ctx context.Context) (*auth.Token, error) { + token, ok := ctx.Value(requestAuthTokenCtxKey).(string) + if !ok { + // Should never happen as we ensure this value exists on proxy.Store method. + return nil, errors.New("token not found in the context") + } + return &auth.Token{Value: token}, nil +} + +func newProxy(ctx context.Context, opts proxyOpts) (_ *proxy, err error) { + p := &proxy{ + opts: opts, + } + + // Setup GCM client. + clientOpts := []option.ClientOption{ + option.WithGRPCDialOption(grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor)), + } + if opts.defaultUserAgent != "" { + clientOpts = append(clientOpts, option.WithUserAgent(opts.defaultUserAgent)) + } + if opts.endpoint != "" { + clientOpts = append(clientOpts, option.WithEndpoint(opts.endpoint)) + } + if opts.credentialsFile != "" { + if opts.forwardCredentials { + return nil, errors.New("both credentialsFile and forwardCredentials options set on proxy; choose one") + } + slog.Info("using JSON file credentials provided") + clientOpts = append(clientOpts, option.WithCredentialsFile(opts.credentialsFile)) + } + if opts.forwardCredentials { + slog.Info("setting up flow for using the Bearer Authorization token from the incoming HTTP request") + clientOpts = append(clientOpts, option.WithAuthCredentials( + auth.NewCredentials(&auth.CredentialsOptions{TokenProvider: &perRequestAuthProvider{}}), + )) + } + p.client, err = monitoring.NewMetricClient(ctx, clientOpts...) + if err != nil { + return nil, fmt.Errorf("create GCM client: %w", err) + } + return p, nil +} + +func (p *proxy) Handler(reg prometheus.Registerer) (pattern string, handler http.Handler) { + return pathPrefix, detectPOSTMethodProjectID( + instrument(reg, path.Join(pathPrefix, "PROJECT_ID", pathSuffix), + remote.NewWriteHandler( + p, + remote.MessageTypes{remote.WriteV2MessageType}, + remote.WithWriteHandlerLogger(p.opts.logger), + ), + ), + ) +} + +// Copied from https://github.com/grpc-ecosystem/grpc-gateway/blob/2fba1914fcc12696707a5dfa91dbf92cdb7af555/runtime/context.go#L123 +func isValidGRPCMetadataTextValue(textValue string) bool { + // Must be a valid gRPC "ASCII-Value" as defined here: + // https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md + // This means printable ASCII (including/plus spaces); 0x20 to 0x7E inclusive. + bytes := []byte(textValue) // gRPC validates strings on the byte level, not Unicode. + for _, ch := range bytes { + if ch < 0x20 || ch > 0x7E { + return false + } + } + return true +} + +func (p *proxy) injectAuthFromRequest(req *http.Request) (context.Context, error) { + // Forward potential auth token from the client. + authToken := req.Header.Get("Authorization") + if authToken == "" { + return nil, newHTTPError(http.StatusUnauthorized, "no Authorization header found") + } + // We expect bearer token, find the token itself. + parts := strings.Split(authToken, " ") + if len(parts) != 2 { + return nil, newHTTPErrorf(http.StatusUnauthorized, "unexpected format of the Authorization header value, found %v part for ' ' delimiter; expected 'Bearer ' format", len(parts)) + } + if !isValidGRPCMetadataTextValue(parts[1]) { + return nil, newHTTPError(http.StatusBadRequest, "value of HTTP Authorization header contains non-ASCII value (not valid as gRPC metadata)") + } + return context.WithValue(req.Context(), requestAuthTokenCtxKey, parts[1]), nil +} + +// Store is what's invoked from client_golang's remote_write handling library when 2.x Remote Write is sent. +// It parses PRW 2.x and sends GCM CreateTimesSeries (max) 200 series batches for every set of samples. +func (p *proxy) Store(req *http.Request, _ remote.WriteMessageType) (_ *remote.WriteResponse, retErr error) { + w := remote.NewWriteResponse() + + ctx := req.Context() + if p.opts.forwardCredentials { + var err error + ctx, err = p.injectAuthFromRequest(req) + if err != nil { + w.SetStatusCode(httpCodeFromErrorOr500(err)) + return w, err + } + } + projectID := getProjectID(req.Context()) + if projectID == "unknown" { + // Programmatic error, should not happen if the detectPOSTMethodProjectID middleware is set. + return nil, newHTTPError(http.StatusInternalServerError, "no project id found") + } + + serializedRequest, err := io.ReadAll(req.Body) + if err != nil { + w.SetStatusCode(http.StatusBadRequest) + return w, err + } + + r := &writev2.Request{} + if err := r.UnmarshalVT(serializedRequest); err != nil { + w.SetStatusCode(http.StatusInternalServerError) + return w, fmt.Errorf("decoding v2 request %w", err) + } + + if len(r.Timeseries) == 0 { + w.SetStatusCode(http.StatusBadRequest) + return w, errors.New("no series in v2 request") + } + + qm := startQueueManager(ctx, func(ctx context.Context, series []*monitoring_pb.TimeSeries) error { + if err := p.client.CreateTimeSeries( + ctx, + &monitoring_pb.CreateTimeSeriesRequest{Name: fmt.Sprintf("projects/%s", projectID), TimeSeries: series}, + ); err != nil { + return newHTTPErrorFromGRPC(fmt.Errorf("gcm batch send failed for %v series; no more retries; %w", len(series), err)) + } + return nil + }) + defer func() { + qm.CloseAndWait() + retErr = httpErrJoin(retErr, qm.Err()) + if retErr != nil { + w.SetStatusCode(httpCodeFromErrorOr500(retErr)) + } + }() + + stats, err := Convert(ctx, r, qm) + w.Samples += stats.Samples + w.Histograms += stats.Histograms + w.Exemplars += stats.Exemplars + return w, err +} + +type gcmCreateTimeSeriesFunc func(ctx context.Context, series []*monitoring_pb.TimeSeries) error + +type queueManager struct { + ctx context.Context + queueCh chan *monitoring_pb.TimeSeries + gcmCreateTSFn gcmCreateTimeSeriesFunc + + wg sync.WaitGroup + err error +} + +func startQueueManager(ctx context.Context, gcmCreateTSFn gcmCreateTimeSeriesFunc) *queueManager { + q := &queueManager{ + ctx: ctx, + queueCh: make(chan *monitoring_pb.TimeSeries, 10), + gcmCreateTSFn: gcmCreateTSFn, + } + q.wg.Add(1) + + go q.run() + return q +} + +const maxBatchSize = 100 + +func (q *queueManager) run() { + defer q.wg.Done() + + batch := make([]*monitoring_pb.TimeSeries, 0, maxBatchSize) + for { + flushBatch := false + + // Ignore checking context, we expect close method to tell us when to stop. + ts, ok := <-q.queueCh + if ts == nil || !ok { + // Sentinel value for flushing or closing channel. + flushBatch = len(batch) > 0 + } else { + // Adding to batch. + batch = append(batch, ts) + // https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3 200 objects per request max. + flushBatch = len(batch) >= maxBatchSize + } + + if flushBatch { + q.err = httpErrJoin(q.err, q.gcmCreateTSFn(q.ctx, batch)) + batch = batch[:0] + } + + if !ok { + return + } + } +} + +func (q *queueManager) Enqueue(ts *monitoring_pb.TimeSeries) { + if ts == nil { + return + } + q.queueCh <- ts +} + +func (q *queueManager) Flush() { + q.queueCh <- nil +} + +func (q *queueManager) CloseAndWait() { + q.Flush() + close(q.queueCh) + q.wg.Wait() +} + +func (q *queueManager) Err() error { + return q.err +} diff --git a/google/cmd/prw2gcm/proxy_test.go b/google/cmd/prw2gcm/proxy_test.go new file mode 100644 index 0000000000..c1d5882b91 --- /dev/null +++ b/google/cmd/prw2gcm/proxy_test.go @@ -0,0 +1,282 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build gcme2e +// +build gcme2e + +package main + +import ( + "context" + "fmt" + "log/slog" + "math" + "math/rand" + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + gcm "cloud.google.com/go/monitoring/apiv3/v2" + "github.com/oklog/ulid" + "github.com/prometheus/client_golang/exp/api/remote" + "github.com/prometheus/client_golang/prometheus" + writev2 "github.com/prometheus/prometheus/google/cmd/prw2gcm/io/prometheus/write/v2" + "github.com/prometheus/prometheus/model/timestamp" + "github.com/prometheus/prometheus/model/value" + "github.com/stretchr/testify/require" + "golang.org/x/oauth2/google" + "google.golang.org/api/option" + apihttp "google.golang.org/api/transport/http" +) + +// gcmServiceAccountOrFail gets the Google SA JSON content from GCM_SECRET +// environment variable or fails. +func gcmServiceAccountOrFail(t testing.TB) []byte { + saJSON := []byte(os.Getenv("GCM_SECRET")) + if len(saJSON) == 0 { + t.Fatal("gcmServiceAccountOrFail: no GCM_SECRET env var provided, can't run the test") + } + return saJSON +} + +// NewRoundTripper creates a round tripper that adds Google Cloud Monitoring authorization to calls +// using either a credentials file or the default credentials. +// Literal copy of https://github.com/prometheus/prometheus/blob/main/storage/remote/googleiam/googleiam.go +// TODO(bwplotka): Import directly once this fork will contain this change. +func NewRoundTripper(gcmJSON []byte, next http.RoundTripper) (http.RoundTripper, error) { + if next == nil { + next = http.DefaultTransport + } + + const scopes = "https://www.googleapis.com/auth/monitoring.write" + ctx := context.Background() + opts := []option.ClientOption{ + option.WithScopes(scopes), + option.WithCredentialsJSON(gcmJSON), + } + return apihttp.NewTransport(ctx, next, opts...) +} + +func generateTestID(t *testing.T) string { + return fmt.Sprintf("%v: %v", t.Name(), ulid.MustNew(ulid.Now(), rand.New(rand.NewSource(time.Now().UnixNano()))).String()) +} + +// TestProxyGCM tests basic proxy handling, some of it against the production GCM service. +// GCM_SECRET environment variable is required, with the GCP secret with the +// https://www.googleapis.com/auth/monitoring.write scope. +func TestProxyGCM(t *testing.T) { + gcmSA := gcmServiceAccountOrFail(t) + + slog.SetLogLoggerLevel(slog.LevelDebug) + // Setup server side, explicitly without credentials -- we test Prometheus auth setup here. + p, err := newProxy(t.Context(), proxyOpts{ + logger: slog.Default(), + forwardCredentials: true, + defaultUserAgent: "prw2gcm/test", + }) + require.NoError(t, err) + + mux := http.NewServeMux() + mux.Handle(p.Handler(prometheus.NewRegistry())) + srv := httptest.NewServer(mux) + t.Cleanup(srv.Close) + + // Client side, enhanced with the Prometheus GCM remote Write round tripper. + client := srv.Client() + rt, err := NewRoundTripper(gcmSA, client.Transport) + require.NoError(t, err) + client.Transport = rt + + ctx := t.Context() + // We infer project id from credentials too. + creds, err := google.CredentialsFromJSON(ctx, gcmSA, gcm.DefaultAuthScopes()...) + require.NoError(t, err) + + api, err := remote.NewAPI( + // NOTE: NewAPI adds /api/v1/write path by default. + fmt.Sprintf("%s/v1/projects/%s/location/global/prometheus", srv.URL, creds.ProjectID), + remote.WithAPIHTTPClient(client), + ) + require.NoError(t, err) + + t.Run("v1 should be rejected", func(t *testing.T) { + stats, err := api.Write(ctx, remote.WriteV1MessageType, &writev2.Request{}) + fmt.Println(err) + require.Error(t, err) + require.True(t, stats.NoDataWritten()) + }) + t.Run("wrong path", func(t *testing.T) { + wrongPathAPI, err := remote.NewAPI(fmt.Sprintf("%s/v1/wrong", srv.URL)) + require.NoError(t, err) + stats, err := wrongPathAPI.Write(ctx, remote.WriteV1MessageType, &writev2.Request{}) + fmt.Println(err) + require.Error(t, err) + require.True(t, stats.NoDataWritten()) + }) + t.Run("empty v2", func(t *testing.T) { + stats, err := api.Write(ctx, remote.WriteV2MessageType, &writev2.Request{}) + require.Error(t, err) + require.True(t, stats.NoDataWritten()) + }) + // NOTE: Detailed, sample specific tests are not needed here, given the + // detailed google/internal/promqle2etest tests. + t.Run("gauge and counter", func(t *testing.T) { + testID := generateTestID(t) + ts := time.Now().Add(-1 * time.Hour) + + s := writev2.NewSymbolTable() + r := &writev2.Request{ + Timeseries: []*writev2.TimeSeries{ + { + LabelsRefs: []uint32{ + s.Symbolize("__name__"), s.Symbolize("proxy_test_counter_total"), + // Some target labels. + s.Symbolize("project_id"), s.Symbolize(creds.ProjectID), + s.Symbolize("location"), s.Symbolize("europe-west3-a"), + s.Symbolize("cluster"), s.Symbolize("prom-github-action"), + s.Symbolize("job"), s.Symbolize("TestProxyGCM"), + s.Symbolize("instance"), s.Symbolize(testID), + // Other. + s.Symbolize("repo"), s.Symbolize("github.com/GoogleCloudPlatform/prometheus"), + }, + Metadata: &writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_COUNTER, + HelpRef: s.Symbolize("Test counter used by prw2gcm test"), + UnitRef: s.Symbolize("seconds"), + }, + Samples: []*writev2.Sample{ + { + CreatedTimestamp: timestamp.FromTime(ts), + Timestamp: timestamp.FromTime(ts.Add(10 * time.Minute)), + Value: 10, + }, + { + CreatedTimestamp: timestamp.FromTime(ts), + Timestamp: timestamp.FromTime(ts.Add(11 * time.Minute)), + Value: 100, + }, + { + CreatedTimestamp: timestamp.FromTime(ts), + Timestamp: timestamp.FromTime(ts.Add(12 * time.Minute)), + Value: math.Float64frombits(value.StaleNaN), // Should be skipped. + }, + { + CreatedTimestamp: timestamp.FromTime(ts.Add(13 * time.Minute)), + Timestamp: timestamp.FromTime(ts.Add(14 * time.Minute)), + Value: 50, + }, + }, + }, + { + LabelsRefs: []uint32{ + s.Symbolize("__name__"), s.Symbolize("proxy_test_gauge"), + // Some target labels. + s.Symbolize("project_id"), s.Symbolize(creds.ProjectID), + s.Symbolize("location"), s.Symbolize("europe-west3-a"), + s.Symbolize("cluster"), s.Symbolize("prom-github-action"), + s.Symbolize("job"), s.Symbolize("TestProxyGCM"), + s.Symbolize("instance"), s.Symbolize(testID), + // Other. + s.Symbolize("repo"), s.Symbolize("github.com/GoogleCloudPlatform/prometheus"), + }, + Metadata: &writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_GAUGE, + HelpRef: s.Symbolize("Test gauge used by prw2gcm test"), + UnitRef: s.Symbolize("seconds"), + }, + Samples: []*writev2.Sample{ + { + Timestamp: timestamp.FromTime(ts.Add(10 * time.Minute)), + Value: 10, + }, + { + Timestamp: timestamp.FromTime(ts.Add(11 * time.Minute)), + Value: 5, + }, + { + Timestamp: timestamp.FromTime(ts.Add(12 * time.Minute)), + Value: math.Float64frombits(value.StaleNaN), // Should be skipped. + }, + { + CreatedTimestamp: timestamp.FromTime(ts.Add(13 * time.Minute)), + Timestamp: timestamp.FromTime(ts.Add(13 * time.Minute)), + Value: 1124.155, + }, + }, + }, + }, + } + r.Symbols = s.Symbols() + stats, err := api.Write(ctx, remote.WriteV2MessageType, r) + require.NoError(t, err) + require.Equal(t, 6, stats.Samples) + }) + t.Run("no ct", func(t *testing.T) { + testID := generateTestID(t) + ts := time.Now().Add(-1 * time.Hour) + + s := writev2.NewSymbolTable() + r := &writev2.Request{ + Timeseries: []*writev2.TimeSeries{ + { + LabelsRefs: []uint32{ + s.Symbolize("__name__"), s.Symbolize("proxy_test_counter_total"), + // Some target labels. + s.Symbolize("project_id"), s.Symbolize(creds.ProjectID), + s.Symbolize("location"), s.Symbolize("europe-west3-a"), + s.Symbolize("cluster"), s.Symbolize("prom-github-action"), + s.Symbolize("job"), s.Symbolize("TestProxyGCM"), + s.Symbolize("instance"), s.Symbolize(testID), + // Other. + s.Symbolize("repo"), s.Symbolize("github.com/GoogleCloudPlatform/prometheus"), + }, + Metadata: &writev2.Metadata{ + Type: writev2.Metadata_METRIC_TYPE_COUNTER, + HelpRef: s.Symbolize("Test counter used by prw2gcm test"), + UnitRef: s.Symbolize("seconds"), + }, + Samples: []*writev2.Sample{ + { + CreatedTimestamp: timestamp.FromTime(ts), + Timestamp: timestamp.FromTime(ts.Add(10 * time.Minute)), + Value: 10, + }, + { + // No CT! + Timestamp: timestamp.FromTime(ts.Add(11 * time.Minute)), + Value: 100, + }, + { + CreatedTimestamp: timestamp.FromTime(ts), + Timestamp: timestamp.FromTime(ts.Add(12 * time.Minute)), + Value: math.Float64frombits(value.StaleNaN), // Should be skipped. + }, + { + CreatedTimestamp: timestamp.FromTime(ts.Add(13 * time.Minute)), + Timestamp: timestamp.FromTime(ts.Add(14 * time.Minute)), + Value: 50, + }, + }, + }, + }, + } + r.Symbols = s.Symbols() + stats, err := api.Write(ctx, remote.WriteV2MessageType, r) + require.Error(t, err) + // Despite error, we should send 2 samples. + require.Equal(t, 2, stats.Samples) + }) +} diff --git a/google/internal/promqle2etest/backend_prom_gcm.go b/google/internal/promqle2etest/backend_prom_gcm.go index 33009c2a44..af283a85c3 100644 --- a/google/internal/promqle2etest/backend_prom_gcm.go +++ b/google/internal/promqle2etest/backend_prom_gcm.go @@ -19,6 +19,7 @@ import ( "os" "path/filepath" "strconv" + "strings" "testing" gcm "cloud.google.com/go/monitoring/apiv3/v2" @@ -27,30 +28,29 @@ import ( "github.com/prometheus/client_golang/api" v1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/compliance/promqle2e" + "github.com/stretchr/testify/require" "golang.org/x/oauth2" "golang.org/x/oauth2/google" ) -var _ promqle2e.Backend = PrometheusForkGCMBackend{} - -// PrometheusForkGCMBackend represents a Prometheus GMP fork scraping -// metrics and pushing to GCM API for consumption. -// This generally follows https://cloud.google.com/stackdriver/docs/managed-prometheus/setup-unmanaged. -type PrometheusForkGCMBackend struct { - Image string - Name string - GCMSA []byte -} - -func (p PrometheusForkGCMBackend) Ref() string { - return p.Name -} +var ( + _ promqle2e.Backend = PrometheusForkGCMBackend{} + _ promqle2e.Backend = PrometheusGCMBackend{} +) // newPrometheus creates a new Prometheus runnable. -func newPrometheus(env e2e.Environment, name string, image string, scrapeTargetAddress string, flagOverride func(dir string) map[string]string) *e2emon.Prometheus { +func newPrometheus( + env e2e.Environment, name string, image string, scrapeTargetAddress string, + extraConfig func(dir string) string, + flagOverride func(dir string) map[string]string, +) *e2emon.Prometheus { ports := map[string]int{"http": 9090} f := env.Runnable(name).WithPorts(ports).Future() + + if extraConfig == nil { + extraConfig = func(dir string) string { return "" } + } config := fmt.Sprintf(` global: external_labels: @@ -64,7 +64,7 @@ scrape_configs: metric_relabel_configs: - regex: instance action: labeldrop -`, name, scrapeTargetAddress) +%s`, name, scrapeTargetAddress, extraConfig(f.Dir())) if err := os.WriteFile(filepath.Join(f.Dir(), "prometheus.yml"), []byte(config), 0600); err != nil { return &e2emon.Prometheus{Runnable: e2e.NewFailedRunnable(name, fmt.Errorf("create prometheus config failed: %w", err))} } @@ -94,22 +94,31 @@ scrape_configs: Readiness: e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), User: strconv.Itoa(os.Getuid()), }), "http") - return &e2emon.Prometheus{ Runnable: p, Instrumented: p, } } +// PrometheusForkGCMBackend represents a Prometheus GMP fork scraping +// metrics and pushing to GCM API for consumption. +// This generally follows https://cloud.google.com/stackdriver/docs/managed-prometheus/setup-unmanaged. +type PrometheusForkGCMBackend struct { + Image string + Name string + GCMSA []byte +} + +func (p PrometheusForkGCMBackend) Ref() string { + return p.Name +} + func (p PrometheusForkGCMBackend) StartAndWaitReady(t testing.TB, env e2e.Environment) promqle2e.RunningBackend { t.Helper() ctx := t.Context() - creds, err := google.CredentialsFromJSON(ctx, p.GCMSA, gcm.DefaultAuthScopes()...) - if err != nil { - t.Fatalf("create credentials from JSON: %s", err) - } + require.NoError(t, err, "create credentials from JSON") // Fake, does not matter. cluster := "pe-github-action" @@ -119,27 +128,107 @@ func (p PrometheusForkGCMBackend) StartAndWaitReady(t testing.TB, env e2e.Enviro Address: fmt.Sprintf("https://monitoring.googleapis.com/v1/projects/%s/location/global/prometheus", creds.ProjectID), Client: oauth2.NewClient(ctx, creds.TokenSource), }) - if err != nil { - t.Fatalf("create Prometheus client: %s", err) - } + require.NoError(t, err, "create Prometheus client failed") replayer := promqle2e.StartIngestByScrapeReplayer(t, env) - prom := newPrometheus(env, p.Name, p.Image, replayer.Endpoint(env), func(dir string) map[string]string { - if err := os.WriteFile(filepath.Join(dir, "gcm-sa.json"), p.GCMSA, 0600); err != nil { - t.Fatalf("write JSON creds: %s", err) - } - - // Flags as per https://cloud.google.com/stackdriver/docs/managed-prometheus/setup-unmanaged#gmp-binary. - return map[string]string{"--export.label.project-id": creds.ProjectID, - "--export.label.location": location, - "--export.label.cluster": cluster, - "--export.credentials-file": filepath.Join(dir, "gcm-sa.json"), - } + prom := newPrometheus( + env, p.Name, p.Image, replayer.Endpoint(env), + nil, + func(dir string) map[string]string { + // Flags as per https://cloud.google.com/stackdriver/docs/managed-prometheus/setup-unmanaged#gmp-binary. + return map[string]string{"--export.label.project-id": creds.ProjectID, + "--export.label.location": location, + "--export.label.cluster": cluster, + "--export.credentials-file": filepath.Join(dir, "gcm-sa.json"), + } + }, + ) + require.NoError(t, os.WriteFile(filepath.Join(prom.Dir(), "gcm-sa.json"), p.GCMSA, 0600), "writing GCM JSON failed") + require.NoError(t, e2e.StartAndWaitReady(prom)) + + return promqle2e.NewRunningScrapeReplayBasedBackend( + replayer, + map[string]string{ + "cluster": cluster, + "location": location, + "project_id": creds.ProjectID, + "collector": p.Name, + "job": "test", + }, + v1.NewAPI(cl), + ) +} + +// PrometheusGCMBackend represents a vanilla Prometheus scraping +// metrics and pushing to GCM PRW API for consumption. +// This represents the "unforked" flow which is currently in development. +type PrometheusGCMBackend struct { + Image string + Name string + GCMSA []byte + + // PRW2GCMProxyImage is a docker image containing prw2gcm binary from google/cmd/prw2gcm. + // GCM PRW 2.0 support is in progress. In the meantime, prw2gcm simple, stateless + // proxy can be injected for transparent GCM conversion (thanks to the new PRW 2.0 protocol + // this is now possible). This allows testing future OSS Prometheus compatibility with GCM PRW. + // + // If this option is set, this backend will start prw2gcm sidecar and tell Prometheus to use prw2gcm + // local HTTP endpoint as the Remote Write URL target instead of GCM. prw2gcm will then + // route to GCM. No other changes are done to Prometheus (e.g. we inject GCM auth on Prometheus side). + PRW2GCMProxyImage string +} + +func (p PrometheusGCMBackend) Ref() string { + return p.Name +} + +func (p PrometheusGCMBackend) StartAndWaitReady(t testing.TB, env e2e.Environment) promqle2e.RunningBackend { + t.Helper() + + ctx := t.Context() + creds, err := google.CredentialsFromJSON(ctx, p.GCMSA, gcm.DefaultAuthScopes()...) + require.NoError(t, err, "create credentials from JSON") + + // Fake, does not matter. + cluster := "pe-github-action" + location := "europe-west3-a" + + gcmPromAPI := fmt.Sprintf("https://monitoring.googleapis.com/v1/projects/%s/location/global/prometheus", creds.ProjectID) + cl, err := api.NewClient(api.Config{ + Address: gcmPromAPI, + Client: oauth2.NewClient(ctx, creds.TokenSource), }) - if err := e2e.StartAndWaitReady(prom); err != nil { - t.Fatal(err) + require.NoError(t, err, "create Prometheus client failed") + + remoteWriteURL := gcmPromAPI + "/api/v1/write" + // Inject prw2gcm proxy if requested. + if p.PRW2GCMProxyImage != "" { + prw2gcm := env.Runnable("prw2gcm").WithPorts(map[string]int{"http": 19091}).Init(e2e.StartOptions{ + Command: e2e.NewCommandWithoutEntrypoint("prw2gcm", "--gcm.forward-credentials"), + Image: p.PRW2GCMProxyImage, + }) + require.NoError(t, e2e.StartAndWaitReady(prw2gcm)) + + // In this mode Prometheus talks to prw2gcm, but auth still is initiated on Prometheus. + remoteWriteURL = strings.Replace(remoteWriteURL, "https://monitoring.googleapis.com", prw2gcm.InternalEndpoint("http"), 1) } + replayer := promqle2e.StartIngestByScrapeReplayer(t, env) + prom := newPrometheus( + env, p.Name, p.Image, replayer.Endpoint(env), + func(dir string) string { + return fmt.Sprintf(`remote_write: +- name: gcm + url: %s + google_iam: + credentials_file: %s +`, remoteWriteURL, filepath.Join(dir, "gcm-sa.json")) + }, + nil, + ) + require.NoError(t, os.WriteFile(filepath.Join(prom.Dir(), "gcm-sa.json"), p.GCMSA, 0600), "writing GCM JSON failed") + require.NoError(t, e2e.StartAndWaitReady(prom)) + return promqle2e.NewRunningScrapeReplayBasedBackend( replayer, map[string]string{ diff --git a/google/internal/promqle2etest/gcm_test.go b/google/internal/promqle2etest/gcm_test.go index 7a1ff504af..a4168fc53f 100644 --- a/google/internal/promqle2etest/gcm_test.go +++ b/google/internal/promqle2etest/gcm_test.go @@ -21,7 +21,6 @@ // similar tests with various OpenTelemetry pipelines e.g. target --PromProto--> OpenTelemetry Collector --GCM API--> GCM. //go:build gcme2e -// +build gcme2e package promqle2etest @@ -66,8 +65,8 @@ func gmpPrometheusImageOrFail(t testing.TB) string { } // TODO(bwplotka): Add target --PromProto--> Prometheus vanilla --PRW 2.0--> GCM case once GCM exposes PRW 2.0. -func setupBackends(t testing.TB) (promqle2e.PrometheusBackend, PrometheusForkGCMBackend, LocalExportGCMBackend) { - // target --PromProto--> Prometheus (referencing OSS behaviour). +func setupBackends(t testing.TB) (promqle2e.PrometheusBackend, PrometheusForkGCMBackend, PrometheusGCMBackend, LocalExportGCMBackend) { + // target --PromProto--> OSS Prometheus (referencing OSS behaviour). prom := promqle2e.PrometheusBackend{ Name: "prom", Image: "quay.io/prometheus/prometheus:v3.5.0", @@ -78,12 +77,19 @@ func setupBackends(t testing.TB) (promqle2e.PrometheusBackend, PrometheusForkGCM Image: gmpPrometheusImageOrFail(t), GCMSA: gcmServiceAccountOrFail(t), } + // target --PromProto--> OSS Prometheus --PRW 2.0--> prw2gcm --GCM API--> GCM + promWithProxyGCM := PrometheusGCMBackend{ + Name: "prom-with-proxy-gcm", + Image: "quay.io/prometheus/prometheus:v3.5.0", // PRW 2.0 development. + PRW2GCMProxyImage: "gcr.io/gpe-test-1/collector:v2.53.5-gmp.0-dev1", // DEBUG gmpPrometheusImageOrFail(t), + GCMSA: gcmServiceAccountOrFail(t), + } // local prometheus-engine/pkg/export code --GCM API--> GCM. localExportGCM := LocalExportGCMBackend{ Name: "local-export-gcm", GCMSA: gcmServiceAccountOrFail(t), } - return prom, gmpPromGCM, localExportGCM + return prom, gmpPromGCM, promWithProxyGCM, localExportGCM } // TestExportGCM_PrometheusCounter_NoCT tests a counter sample behaviour @@ -91,7 +97,8 @@ func setupBackends(t testing.TB) (promqle2e.PrometheusBackend, PrometheusForkGCM func TestExportGCM_PrometheusCounter_NoCT(t *testing.T) { const interval = 15 * time.Second - prom, gmpPromGCM, localExportGCM := setupBackends(t) + // NOTE: Prometheus does not have CT generation (yet), so we can't expect any samples from promWithProxyGCM; skip it. + prom, gmpPromGCM, _, localExportGCM := setupBackends(t) pt := promqle2e.NewScrapeStyleTest(t) pt.SetCurrentTime(time.Now().Add(-10 * time.Minute)) // We only do a few scrapes, so -10m buffer is enough. @@ -197,7 +204,7 @@ func TestExportGCM_PrometheusCounter_NoCT(t *testing.T) { func TestExportGCM_PrometheusCounter_WithCT(t *testing.T) { const interval = 15 * time.Second - prom, gmpPromGCM, localExportGCM := setupBackends(t) + prom, gmpPromGCM, promWithProxyGCM, localExportGCM := setupBackends(t) pt := promqle2e.NewScrapeStyleTest(t) pt.SetCurrentTime(time.Now().Add(-10 * time.Minute)) // We only do a few scrapes, so -10m buffer is enough. @@ -216,7 +223,8 @@ func TestExportGCM_PrometheusCounter_WithCT(t *testing.T) { c = counter.WithLabelValues("bar") c.Add(200) pt.RecordScrape(interval). - Expect(c, 200, prom) + Expect(c, 200, prom). + Expect(c, 210, promWithProxyGCM) // Nothing is expected for GCM due to cannibalization required if the target does not emit CT (which this metric does not). // See https://cloud.google.com/stackdriver/docs/managed-prometheus/troubleshooting#counter-sums // TODO(bwplotka): Target emits CT, but our fork does not yet support it, do this in the next PR. @@ -225,13 +233,15 @@ func TestExportGCM_PrometheusCounter_WithCT(t *testing.T) { pt.RecordScrape(interval). Expect(c, 10, localExportGCM). Expect(c, 10, gmpPromGCM). - Expect(c, 210, prom) + Expect(c, 210, prom). + Expect(c, 210, promWithProxyGCM) c.Add(40) pt.RecordScrape(interval). Expect(c, 50, localExportGCM). Expect(c, 50, gmpPromGCM). - Expect(c, 250, prom) + Expect(c, 250, prom). + Expect(c, 250, promWithProxyGCM) // Reset to 0 (simulating instrumentation resetting metric or restarting target). counter.Reset() @@ -239,13 +249,15 @@ func TestExportGCM_PrometheusCounter_WithCT(t *testing.T) { pt.RecordScrape(interval). Expect(c, 0, localExportGCM). Expect(c, 0, gmpPromGCM). - Expect(c, 0, prom) + Expect(c, 0, prom). + Expect(c, 0, promWithProxyGCM) c.Add(150) pt.RecordScrape(interval). Expect(c, 150, localExportGCM). Expect(c, 150, gmpPromGCM). - Expect(c, 150, prom) + Expect(c, 150, prom). + Expect(c, 150, promWithProxyGCM) // Reset to 0 with addition. counter.Reset() @@ -254,19 +266,22 @@ func TestExportGCM_PrometheusCounter_WithCT(t *testing.T) { pt.RecordScrape(interval). Expect(c, 20, localExportGCM). Expect(c, 20, gmpPromGCM). - Expect(c, 20, prom) + Expect(c, 20, prom). + Expect(c, 20, promWithProxyGCM) c.Add(50) pt.RecordScrape(interval). Expect(c, 70, localExportGCM). Expect(c, 70, gmpPromGCM). - Expect(c, 70, prom) + Expect(c, 70, prom). + Expect(c, 70, promWithProxyGCM) c.Add(10) pt.RecordScrape(interval). Expect(c, 80, localExportGCM). Expect(c, 80, gmpPromGCM). - Expect(c, 80, prom) + Expect(c, 80, prom). + Expect(c, 80, promWithProxyGCM) // Tricky reset case, unnoticeable reset for Prometheus without created timestamp as well. counter.Reset() @@ -275,7 +290,8 @@ func TestExportGCM_PrometheusCounter_WithCT(t *testing.T) { pt.RecordScrape(interval). Expect(c, 600, localExportGCM). Expect(c, 600, gmpPromGCM). - Expect(c, 600, prom) + Expect(c, 600, prom). + Expect(c, 600, promWithProxyGCM) // Prometheus SDK supports CTs. This "transform" validates that invariance. pt.Transform(func(recordings [][]*dto.MetricFamily) [][]*dto.MetricFamily { @@ -301,7 +317,7 @@ func TestExportGCM_PrometheusCounter_WithCT(t *testing.T) { func TestExportGCM_PrometheusGauge(t *testing.T) { const interval = 15 * time.Second - prom, gmpPromGCM, localExportGCM := setupBackends(t) + prom, gmpPromGCM, promWithProxyGCM, localExportGCM := setupBackends(t) pt := promqle2e.NewScrapeStyleTest(t) pt.SetCurrentTime(time.Now().Add(-10 * time.Minute)) // We only do a few scrapes, so -10m buffer is enough. @@ -322,19 +338,22 @@ func TestExportGCM_PrometheusGauge(t *testing.T) { pt.RecordScrape(interval). Expect(g, 200, localExportGCM). Expect(g, 200, gmpPromGCM). - Expect(g, 200, prom) + Expect(g, 200, prom). + Expect(g, 200, promWithProxyGCM) g.Sub(10) pt.RecordScrape(interval). Expect(g, 190, localExportGCM). Expect(g, 190, gmpPromGCM). - Expect(g, 190, prom) + Expect(g, 190, prom). + Expect(g, 190, promWithProxyGCM) g.Add(40) pt.RecordScrape(interval). Expect(g, 230, localExportGCM). Expect(g, 230, gmpPromGCM). - Expect(g, 230, prom) + Expect(g, 230, prom). + Expect(g, 230, promWithProxyGCM) // Reset to 0 (simulating instrumentation resetting metric or restarting target). gauge.Reset() @@ -342,7 +361,8 @@ func TestExportGCM_PrometheusGauge(t *testing.T) { pt.RecordScrape(interval). Expect(g, 0, localExportGCM). Expect(g, 0, gmpPromGCM). - Expect(g, 0, prom) + Expect(g, 0, prom). + Expect(g, 0, promWithProxyGCM) ctx, cancel := context.WithTimeout(t.Context(), 5*time.Minute) t.Cleanup(cancel) @@ -355,7 +375,7 @@ func TestExportGCM_MetricHelpIngestion(t *testing.T) { mName = "promqle2e_test_gauge_help" ) - _, _, localExportGCM := setupBackends(t) + _, _, _, localExportGCM := setupBackends(t) pt := promqle2e.NewScrapeStyleTest(t) pt.SetCurrentTime(time.Now().Add(-10 * time.Minute)) // We only do a few scrapes, so -10m buffer is enough.