. White space outside of
-// logger names and levels is ignored. The DEFAULT module is specified
-// with the name "DEFAULT".
-//
-// An example specification:
-// `DEFAULT=ERROR; foo.bar=WARNING`
-func ConfigureLoggers(specification string) error {
- return DefaultLoggerCollection.ConfigureLoggers(specification)
-}
-
-// SetLevel will set the current log level for all loggers on the default
-// collection with names that match a provided regular expression. If the
-// regular expression is nil, then all loggers match.
-func SetLevel(re *regexp.Regexp, level LogLevel) {
- DefaultLoggerCollection.SetLevel(re, level)
-}
-
-// SetHandler will set the current log handler for all loggers on the default
-// collection with names that match a provided regular expression. If the
-// regular expression is nil, then all loggers match.
-func SetHandler(re *regexp.Regexp, handler Handler) {
- DefaultLoggerCollection.SetHandler(re, handler)
-}
-
-// SetTextTemplate will set the current text template for all loggers on the
-// default collection with names that match a provided regular expression. If
-// the regular expression is nil, then all loggers match. Note that not every
-// handler is guaranteed to support text templates and a text template will
-// only apply to text-oriented and unstructured handlers.
-func SetTextTemplate(re *regexp.Regexp, t *template.Template) {
- DefaultLoggerCollection.SetTextTemplate(re, t)
-}
-
-// SetTextOutput will set the current output interface for all loggers on the
-// default collection with names that match a provided regular expression. If
-// the regular expression is nil, then all loggers match. Note that not every
-// handler is guaranteed to support text output and a text output interface
-// will only apply to text-oriented and unstructured handlers.
-func SetTextOutput(re *regexp.Regexp, output TextOutput) {
- DefaultLoggerCollection.SetTextOutput(re, output)
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/convenience.go b/vendor/github.com/spacemonkeygo/spacelog/convenience.go
deleted file mode 100644
index b3056329..00000000
--- a/vendor/github.com/spacemonkeygo/spacelog/convenience.go
+++ /dev/null
@@ -1,296 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-import (
- "fmt"
- "io"
-)
-
-// Trace logs a collection of values if the logger's level is trace or even
-// more permissive.
-func (l *Logger) Trace(v ...interface{}) {
- if l.getLevel() <= Trace {
- l.getHandler().Log(l.name, Trace, fmt.Sprint(v...), 1)
- }
-}
-
-// Tracef logs a format string with values if the logger's level is trace or
-// even more permissive.
-func (l *Logger) Tracef(format string, v ...interface{}) {
- if l.getLevel() <= Trace {
- l.getHandler().Log(l.name, Trace, fmt.Sprintf(format, v...), 1)
- }
-}
-
-// Tracee logs an error value if the error is not nil and the logger's level
-// is trace or even more permissive.
-func (l *Logger) Tracee(err error) {
- if l.getLevel() <= Trace && err != nil {
- l.getHandler().Log(l.name, Trace, err.Error(), 1)
- }
-}
-
-// TraceEnabled returns true if the logger's level is trace or even more
-// permissive.
-func (l *Logger) TraceEnabled() bool {
- return l.getLevel() <= Trace
-}
-
-// Debug logs a collection of values if the logger's level is debug or even
-// more permissive.
-func (l *Logger) Debug(v ...interface{}) {
- if l.getLevel() <= Debug {
- l.getHandler().Log(l.name, Debug, fmt.Sprint(v...), 1)
- }
-}
-
-// Debugf logs a format string with values if the logger's level is debug or
-// even more permissive.
-func (l *Logger) Debugf(format string, v ...interface{}) {
- if l.getLevel() <= Debug {
- l.getHandler().Log(l.name, Debug, fmt.Sprintf(format, v...), 1)
- }
-}
-
-// Debuge logs an error value if the error is not nil and the logger's level
-// is debug or even more permissive.
-func (l *Logger) Debuge(err error) {
- if l.getLevel() <= Debug && err != nil {
- l.getHandler().Log(l.name, Debug, err.Error(), 1)
- }
-}
-
-// DebugEnabled returns true if the logger's level is debug or even more
-// permissive.
-func (l *Logger) DebugEnabled() bool {
- return l.getLevel() <= Debug
-}
-
-// Info logs a collection of values if the logger's level is info or even
-// more permissive.
-func (l *Logger) Info(v ...interface{}) {
- if l.getLevel() <= Info {
- l.getHandler().Log(l.name, Info, fmt.Sprint(v...), 1)
- }
-}
-
-// Infof logs a format string with values if the logger's level is info or
-// even more permissive.
-func (l *Logger) Infof(format string, v ...interface{}) {
- if l.getLevel() <= Info {
- l.getHandler().Log(l.name, Info, fmt.Sprintf(format, v...), 1)
- }
-}
-
-// Infoe logs an error value if the error is not nil and the logger's level
-// is info or even more permissive.
-func (l *Logger) Infoe(err error) {
- if l.getLevel() <= Info && err != nil {
- l.getHandler().Log(l.name, Info, err.Error(), 1)
- }
-}
-
-// InfoEnabled returns true if the logger's level is info or even more
-// permissive.
-func (l *Logger) InfoEnabled() bool {
- return l.getLevel() <= Info
-}
-
-// Notice logs a collection of values if the logger's level is notice or even
-// more permissive.
-func (l *Logger) Notice(v ...interface{}) {
- if l.getLevel() <= Notice {
- l.getHandler().Log(l.name, Notice, fmt.Sprint(v...), 1)
- }
-}
-
-// Noticef logs a format string with values if the logger's level is notice or
-// even more permissive.
-func (l *Logger) Noticef(format string, v ...interface{}) {
- if l.getLevel() <= Notice {
- l.getHandler().Log(l.name, Notice, fmt.Sprintf(format, v...), 1)
- }
-}
-
-// Noticee logs an error value if the error is not nil and the logger's level
-// is notice or even more permissive.
-func (l *Logger) Noticee(err error) {
- if l.getLevel() <= Notice && err != nil {
- l.getHandler().Log(l.name, Notice, err.Error(), 1)
- }
-}
-
-// NoticeEnabled returns true if the logger's level is notice or even more
-// permissive.
-func (l *Logger) NoticeEnabled() bool {
- return l.getLevel() <= Notice
-}
-
-// Warn logs a collection of values if the logger's level is warning or even
-// more permissive.
-func (l *Logger) Warn(v ...interface{}) {
- if l.getLevel() <= Warning {
- l.getHandler().Log(l.name, Warning, fmt.Sprint(v...), 1)
- }
-}
-
-// Warnf logs a format string with values if the logger's level is warning or
-// even more permissive.
-func (l *Logger) Warnf(format string, v ...interface{}) {
- if l.getLevel() <= Warning {
- l.getHandler().Log(l.name, Warning, fmt.Sprintf(format, v...), 1)
- }
-}
-
-// Warne logs an error value if the error is not nil and the logger's level
-// is warning or even more permissive.
-func (l *Logger) Warne(err error) {
- if l.getLevel() <= Warning && err != nil {
- l.getHandler().Log(l.name, Warning, err.Error(), 1)
- }
-}
-
-// WarnEnabled returns true if the logger's level is warning or even more
-// permissive.
-func (l *Logger) WarnEnabled() bool {
- return l.getLevel() <= Warning
-}
-
-// Error logs a collection of values if the logger's level is error or even
-// more permissive.
-func (l *Logger) Error(v ...interface{}) {
- if l.getLevel() <= Error {
- l.getHandler().Log(l.name, Error, fmt.Sprint(v...), 1)
- }
-}
-
-// Errorf logs a format string with values if the logger's level is error or
-// even more permissive.
-func (l *Logger) Errorf(format string, v ...interface{}) {
- if l.getLevel() <= Error {
- l.getHandler().Log(l.name, Error, fmt.Sprintf(format, v...), 1)
- }
-}
-
-// Errore logs an error value if the error is not nil and the logger's level
-// is error or even more permissive.
-func (l *Logger) Errore(err error) {
- if l.getLevel() <= Error && err != nil {
- l.getHandler().Log(l.name, Error, err.Error(), 1)
- }
-}
-
-// ErrorEnabled returns true if the logger's level is error or even more
-// permissive.
-func (l *Logger) ErrorEnabled() bool {
- return l.getLevel() <= Error
-}
-
-// Crit logs a collection of values if the logger's level is critical or even
-// more permissive.
-func (l *Logger) Crit(v ...interface{}) {
- if l.getLevel() <= Critical {
- l.getHandler().Log(l.name, Critical, fmt.Sprint(v...), 1)
- }
-}
-
-// Critf logs a format string with values if the logger's level is critical or
-// even more permissive.
-func (l *Logger) Critf(format string, v ...interface{}) {
- if l.getLevel() <= Critical {
- l.getHandler().Log(l.name, Critical, fmt.Sprintf(format, v...), 1)
- }
-}
-
-// Crite logs an error value if the error is not nil and the logger's level
-// is critical or even more permissive.
-func (l *Logger) Crite(err error) {
- if l.getLevel() <= Critical && err != nil {
- l.getHandler().Log(l.name, Critical, err.Error(), 1)
- }
-}
-
-// CritEnabled returns true if the logger's level is critical or even more
-// permissive.
-func (l *Logger) CritEnabled() bool {
- return l.getLevel() <= Critical
-}
-
-// Log logs a collection of values if the logger's level is the provided level
-// or even more permissive.
-func (l *Logger) Log(level LogLevel, v ...interface{}) {
- if l.getLevel() <= level {
- l.getHandler().Log(l.name, level, fmt.Sprint(v...), 1)
- }
-}
-
-// Logf logs a format string with values if the logger's level is the provided
-// level or even more permissive.
-func (l *Logger) Logf(level LogLevel, format string, v ...interface{}) {
- if l.getLevel() <= level {
- l.getHandler().Log(l.name, level, fmt.Sprintf(format, v...), 1)
- }
-}
-
-// Loge logs an error value if the error is not nil and the logger's level
-// is the provided level or even more permissive.
-func (l *Logger) Loge(level LogLevel, err error) {
- if l.getLevel() <= level && err != nil {
- l.getHandler().Log(l.name, level, err.Error(), 1)
- }
-}
-
-// LevelEnabled returns true if the logger's level is the provided level or
-// even more permissive.
-func (l *Logger) LevelEnabled(level LogLevel) bool {
- return l.getLevel() <= level
-}
-
-type writer struct {
- l *Logger
- level LogLevel
-}
-
-func (w *writer) Write(data []byte) (int, error) {
- if w.l.getLevel() <= w.level {
- w.l.getHandler().Log(w.l.name, w.level, string(data), 1)
- }
- return len(data), nil
-}
-
-// Writer returns an io.Writer that writes messages at the given log level.
-func (l *Logger) Writer(level LogLevel) io.Writer {
- return &writer{l: l, level: level}
-}
-
-type writerNoCaller struct {
- l *Logger
- level LogLevel
-}
-
-func (w *writerNoCaller) Write(data []byte) (int, error) {
- if w.l.getLevel() <= w.level {
- w.l.getHandler().Log(w.l.name, w.level, string(data), -1)
- }
- return len(data), nil
-}
-
-// WriterWithoutCaller returns an io.Writer that writes messages at the given
-// log level, but does not attempt to collect the Write caller, and provides
-// no caller information to the log event.
-func (l *Logger) WriterWithoutCaller(level LogLevel) io.Writer {
- return &writerNoCaller{l: l, level: level}
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/doc.go b/vendor/github.com/spacemonkeygo/spacelog/doc.go
deleted file mode 100644
index 28c25b4d..00000000
--- a/vendor/github.com/spacemonkeygo/spacelog/doc.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package spacelog is a collection of interface lego bricks designed to help you
-build a flexible logging system.
-
-spacelog is loosely inspired by the Python logging library.
-
-The basic interaction is between a Logger and a Handler. A Logger is
-what the programmer typically interacts with for creating log messages. A
-Logger will be at a given log level, and if log messages can clear that
-specific logger's log level filter, they will be passed off to the Handler.
-
-Loggers are instantiated from GetLogger and GetLoggerNamed.
-
-A Handler is a very generic interface for handling log events. You can provide
-your own Handler for doing structured JSON output or colorized output or
-countless other things.
-
-Provided are a simple TextHandler with a variety of log event templates and
-TextOutput sinks, such as io.Writer, Syslog, and so forth.
-
-Make sure to see the source of the setup subpackage for an example of easy and
-configurable logging setup at process start:
- http://godoc.org/github.com/spacemonkeygo/spacelog/setup
-*/
-package spacelog
diff --git a/vendor/github.com/spacemonkeygo/spacelog/event.go b/vendor/github.com/spacemonkeygo/spacelog/event.go
deleted file mode 100644
index da863cbf..00000000
--- a/vendor/github.com/spacemonkeygo/spacelog/event.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-import (
- "path/filepath"
- "strings"
- "time"
-)
-
-// TermColors is a type that knows how to output terminal colors and formatting
-type TermColors struct{}
-
-// LogEvent is a type made by the default text handler for feeding to log
-// templates. It has as much contextual data about the log event as possible.
-type LogEvent struct {
- LoggerName string
- Level LogLevel
- Message string
- Filepath string
- Line int
- Timestamp time.Time
-
- TermColors
-}
-
-// Reset resets the color palette for terminals that support color
-func (TermColors) Reset() string { return "\x1b[0m" }
-func (TermColors) Bold() string { return "\x1b[1m" }
-func (TermColors) Underline() string { return "\x1b[4m" }
-func (TermColors) Black() string { return "\x1b[30m" }
-func (TermColors) Red() string { return "\x1b[31m" }
-func (TermColors) Green() string { return "\x1b[32m" }
-func (TermColors) Yellow() string { return "\x1b[33m" }
-func (TermColors) Blue() string { return "\x1b[34m" }
-func (TermColors) Magenta() string { return "\x1b[35m" }
-func (TermColors) Cyan() string { return "\x1b[36m" }
-func (TermColors) White() string { return "\x1b[37m" }
-
-func (l *LogEvent) Filename() string {
- if l.Filepath == "" {
- return ""
- }
- return filepath.Base(l.Filepath)
-}
-
-func (l *LogEvent) Time() string {
- return l.Timestamp.Format("15:04:05")
-}
-
-func (l *LogEvent) Date() string {
- return l.Timestamp.Format("2006/01/02")
-}
-
-// LevelJustified returns the log level in string form justified so that all
-// log levels take the same text width.
-func (l *LogEvent) LevelJustified() (rv string) {
- rv = l.Level.String()
- if len(rv) < 5 {
- rv += strings.Repeat(" ", 5-len(rv))
- }
- return rv
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/handler.go b/vendor/github.com/spacemonkeygo/spacelog/handler.go
deleted file mode 100644
index e3db0865..00000000
--- a/vendor/github.com/spacemonkeygo/spacelog/handler.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-import (
- "text/template"
-)
-
-// Handler is an interface that knows how to process log events. This is the
-// basic interface type for building a logging system. If you want to route
-// structured log data somewhere, you would implement this interface.
-type Handler interface {
- // Log is called for every message. if calldepth is negative, caller
- // information is missing
- Log(logger_name string, level LogLevel, msg string, calldepth int)
-
- // These two calls are expected to be no-ops on non-text-output handlers
- SetTextTemplate(t *template.Template)
- SetTextOutput(output TextOutput)
-}
-
-// HandlerFunc is a type to make implementation of the Handler interface easier
-type HandlerFunc func(logger_name string, level LogLevel, msg string,
- calldepth int)
-
-// Log simply calls f(logger_name, level, msg, calldepth)
-func (f HandlerFunc) Log(logger_name string, level LogLevel, msg string,
- calldepth int) {
- f(logger_name, level, msg, calldepth)
-}
-
-// SetTextTemplate is a no-op
-func (HandlerFunc) SetTextTemplate(t *template.Template) {}
-
-// SetTextOutput is a no-op
-func (HandlerFunc) SetTextOutput(output TextOutput) {}
-
-var (
- defaultHandler = NewTextHandler(StdlibTemplate,
- &StdlibOutput{})
-)
diff --git a/vendor/github.com/spacemonkeygo/spacelog/level.go b/vendor/github.com/spacemonkeygo/spacelog/level.go
deleted file mode 100644
index bf507075..00000000
--- a/vendor/github.com/spacemonkeygo/spacelog/level.go
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-import (
- "fmt"
- "strconv"
- "strings"
-)
-
-type LogLevel int32
-
-const (
- Trace LogLevel = 5
- Debug LogLevel = 10
- Info LogLevel = 20
- Notice LogLevel = 30
- Warning LogLevel = 40
- Error LogLevel = 50
- Critical LogLevel = 60
- // syslog has Alert
- // syslog has Emerg
-
- DefaultLevel = Notice
-)
-
-// String returns the log level name in short form
-func (l LogLevel) String() string {
- switch l.Match() {
- case Critical:
- return "CRIT"
- case Error:
- return "ERR"
- case Warning:
- return "WARN"
- case Notice:
- return "NOTE"
- case Info:
- return "INFO"
- case Debug:
- return "DEBUG"
- case Trace:
- return "TRACE"
- default:
- return "UNSET"
- }
-}
-
-// String returns the log level name in long human readable form
-func (l LogLevel) Name() string {
- switch l.Match() {
- case Critical:
- return "critical"
- case Error:
- return "error"
- case Warning:
- return "warning"
- case Notice:
- return "notice"
- case Info:
- return "info"
- case Debug:
- return "debug"
- case Trace:
- return "trace"
- default:
- return "unset"
- }
-}
-
-// Match returns the greatest named log level that is less than or equal to
-// the receiver log level. For example, if the log level is 43, Match() will
-// return 40 (Warning)
-func (l LogLevel) Match() LogLevel {
- if l >= Critical {
- return Critical
- }
- if l >= Error {
- return Error
- }
- if l >= Warning {
- return Warning
- }
- if l >= Notice {
- return Notice
- }
- if l >= Info {
- return Info
- }
- if l >= Debug {
- return Debug
- }
- if l >= Trace {
- return Trace
- }
- return 0
-}
-
-// LevelFromString will convert a named log level to its corresponding value
-// type, or error if both the name was unknown and an integer value was unable
-// to be parsed.
-func LevelFromString(str string) (LogLevel, error) {
- switch strings.ToLower(str) {
- case "crit", "critical":
- return Critical, nil
- case "err", "error":
- return Error, nil
- case "warn", "warning":
- return Warning, nil
- case "note", "notice":
- return Notice, nil
- case "info":
- return Info, nil
- case "debug":
- return Debug, nil
- case "trace":
- return Trace, nil
- }
- val, err := strconv.ParseInt(str, 10, 32)
- if err == nil {
- return LogLevel(val), nil
- }
- return 0, fmt.Errorf("Invalid log level: %s", str)
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/logger.go b/vendor/github.com/spacemonkeygo/spacelog/logger.go
deleted file mode 100644
index ae1734b2..00000000
--- a/vendor/github.com/spacemonkeygo/spacelog/logger.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-import (
- "sync"
- "sync/atomic"
-)
-
-// Logger is the basic type that allows for logging. A logger has an associated
-// name, given to it during construction, either through a logger collection,
-// GetLogger, GetLoggerNamed, or another Logger's Scope method. A logger also
-// has an associated level and handler, typically configured through the logger
-// collection to which it belongs.
-type Logger struct {
- level LogLevel
- name string
- collection *LoggerCollection
-
- handler_mtx sync.RWMutex
- handler Handler
-}
-
-// Scope returns a new Logger with the same level and handler, using the
-// receiver Logger's name as a prefix.
-func (l *Logger) Scope(name string) *Logger {
- return l.collection.getLogger(l.name+"."+name, l.getLevel(),
- l.getHandler())
-}
-
-func (l *Logger) setLevel(level LogLevel) {
- atomic.StoreInt32((*int32)(&l.level), int32(level))
-}
-
-func (l *Logger) getLevel() LogLevel {
- return LogLevel(atomic.LoadInt32((*int32)(&l.level)))
-}
-
-func (l *Logger) setHandler(handler Handler) {
- l.handler_mtx.Lock()
- defer l.handler_mtx.Unlock()
- l.handler = handler
-}
-
-func (l *Logger) getHandler() Handler {
- l.handler_mtx.RLock()
- defer l.handler_mtx.RUnlock()
- return l.handler
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/output.go b/vendor/github.com/spacemonkeygo/spacelog/output.go
deleted file mode 100644
index 8751268f..00000000
--- a/vendor/github.com/spacemonkeygo/spacelog/output.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-import (
- "bytes"
- "fmt"
- "io"
- "log"
- "os"
- "sync"
-)
-
-type TextOutput interface {
- Output(LogLevel, []byte)
-}
-
-// WriterOutput is an io.Writer wrapper that matches the TextOutput interface
-type WriterOutput struct {
- w io.Writer
-}
-
-// NewWriterOutput returns a TextOutput that writes messages to an io.Writer
-func NewWriterOutput(w io.Writer) *WriterOutput {
- return &WriterOutput{w: w}
-}
-
-func (o *WriterOutput) Output(_ LogLevel, message []byte) {
- o.w.Write(append(bytes.TrimRight(message, "\r\n"), platformNewline...))
-}
-
-// StdlibOutput is a TextOutput that simply writes to the default Go stdlib
-// logging system. It is the default. If you configure the Go stdlib to write
-// to spacelog, make sure to provide a new TextOutput to your logging
-// collection
-type StdlibOutput struct{}
-
-func (*StdlibOutput) Output(_ LogLevel, message []byte) {
- log.Print(string(message))
-}
-
-type bufferMsg struct {
- level LogLevel
- message []byte
-}
-
-// BufferedOutput uses a channel to synchronize writes to a wrapped TextOutput
-// and allows for buffering a limited amount of log events.
-type BufferedOutput struct {
- o TextOutput
- c chan bufferMsg
- running sync.Mutex
- close_once sync.Once
-}
-
-// NewBufferedOutput returns a BufferedOutput wrapping output with a buffer
-// size of buffer.
-func NewBufferedOutput(output TextOutput, buffer int) *BufferedOutput {
- if buffer < 0 {
- buffer = 0
- }
- b := &BufferedOutput{
- o: output,
- c: make(chan bufferMsg, buffer)}
- go b.process()
- return b
-}
-
-// Close shuts down the BufferedOutput's processing
-func (b *BufferedOutput) Close() {
- b.close_once.Do(func() {
- close(b.c)
- })
- b.running.Lock()
- b.running.Unlock()
-}
-
-func (b *BufferedOutput) Output(level LogLevel, message []byte) {
- b.c <- bufferMsg{level: level, message: message}
-}
-
-func (b *BufferedOutput) process() {
- b.running.Lock()
- defer b.running.Unlock()
- for {
- msg, open := <-b.c
- if !open {
- break
- }
- b.o.Output(msg.level, msg.message)
- }
-}
-
-// A TextOutput object that also implements HupHandlingTextOutput may have its
-// OnHup() method called when an administrative signal is sent to this process.
-type HupHandlingTextOutput interface {
- TextOutput
- OnHup()
-}
-
-// FileWriterOutput is like WriterOutput with a plain file handle, but it
-// knows how to reopen the file (or try to reopen it) if it hasn't been able
-// to open the file previously, or if an appropriate signal has been received.
-type FileWriterOutput struct {
- *WriterOutput
- path string
-}
-
-// Creates a new FileWriterOutput object. This is the only case where an
-// error opening the file will be reported to the caller; if we try to
-// reopen it later and the reopen fails, we'll just keep trying until it
-// works.
-func NewFileWriterOutput(path string) (*FileWriterOutput, error) {
- fo := &FileWriterOutput{path: path}
- fh, err := fo.openFile()
- if err != nil {
- return nil, err
- }
- fo.WriterOutput = NewWriterOutput(fh)
- return fo, nil
-}
-
-// Try to open the file with the path associated with this object.
-func (fo *FileWriterOutput) openFile() (*os.File, error) {
- return os.OpenFile(fo.path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
-}
-
-// Try to communicate a message without using our log file. In all likelihood,
-// stderr is closed or redirected to /dev/null, but at least we can try
-// writing there. In the very worst case, if an admin attaches a ptrace to
-// this process, it will be more clear what the problem is.
-func (fo *FileWriterOutput) fallbackLog(tmpl string, args ...interface{}) {
- fmt.Fprintf(os.Stderr, tmpl, args...)
-}
-
-// Output a log line by writing it to the file. If the file has been
-// released, try to open it again. If that fails, cry for a little
-// while, then throw away the message and carry on.
-func (fo *FileWriterOutput) Output(ll LogLevel, message []byte) {
- if fo.WriterOutput == nil {
- fh, err := fo.openFile()
- if err != nil {
- fo.fallbackLog("Could not open %#v: %s", fo.path, err)
- return
- }
- fo.WriterOutput = NewWriterOutput(fh)
- }
- fo.WriterOutput.Output(ll, message)
-}
-
-// Throw away any references/handles to the output file. This probably
-// means the admin wants to rotate the file out and have this process
-// open a new one. Close the underlying io.Writer if that is a thing
-// that it knows how to do.
-func (fo *FileWriterOutput) OnHup() {
- if fo.WriterOutput != nil {
- wc, ok := fo.WriterOutput.w.(io.Closer)
- if ok {
- err := wc.Close()
- if err != nil {
- fo.fallbackLog("Closing %#v failed: %s", fo.path, err)
- }
- }
- fo.WriterOutput = nil
- }
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/output_other.go b/vendor/github.com/spacemonkeygo/spacelog/output_other.go
deleted file mode 100644
index 2be240a1..00000000
--- a/vendor/github.com/spacemonkeygo/spacelog/output_other.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows
-
-package spacelog
-
-var platformNewline = []byte("\n")
diff --git a/vendor/github.com/spacemonkeygo/spacelog/output_windows.go b/vendor/github.com/spacemonkeygo/spacelog/output_windows.go
deleted file mode 100644
index 58b71dab..00000000
--- a/vendor/github.com/spacemonkeygo/spacelog/output_windows.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-var platformNewline = []byte("\r\n")
diff --git a/vendor/github.com/spacemonkeygo/spacelog/setup.go b/vendor/github.com/spacemonkeygo/spacelog/setup.go
deleted file mode 100644
index 2c1cbcee..00000000
--- a/vendor/github.com/spacemonkeygo/spacelog/setup.go
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-import (
- "bytes"
- "fmt"
- "log"
- "math"
- "os"
- "os/signal"
- "regexp"
- "strings"
- "text/template"
-)
-
-// SetupConfig is a configuration struct meant to be used with
-// github.com/spacemonkeygo/flagfile/utils.Setup
-// but can be used independently.
-type SetupConfig struct {
- Output string `default:"stderr" usage:"log output. can be stdout, stderr, syslog, or a path"`
- Level string `default:"" usage:"base logger level"`
- Filter string `default:"" usage:"sets loggers matching this regular expression to the lowest level"`
- Format string `default:"" usage:"format string to use"`
- Stdlevel string `default:"warn" usage:"logger level for stdlib log integration"`
- Subproc string `default:"" usage:"process to run for stdout/stderr-captured logging. The command is first processed as a Go template that supports {{.Facility}}, {{.Level}}, and {{.Name}} fields, and then passed to sh. If set, will redirect stdout and stderr to the given process. A good default is 'setsid logger --priority {{.Facility}}.{{.Level}} --tag {{.Name}}'"`
- Buffer int `default:"0" usage:"the number of messages to buffer. 0 for no buffer"`
- // Facility defaults to syslog.LOG_USER (which is 8)
- Facility int `default:"8" usage:"the syslog facility to use if syslog output is configured"`
- HupRotate bool `default:"false" usage:"if true, sending a HUP signal will reopen log files"`
- Config string `default:"" usage:"a semicolon separated list of logger=level; sets each log to the corresponding level"`
-}
-
-var (
- stdlog = GetLoggerNamed("stdlog")
- funcmap = template.FuncMap{"ColorizeLevel": ColorizeLevel}
-)
-
-// SetFormatMethod adds functions to the template function map, such that
-// command-line and Setup provided templates can call methods added to the map
-// via this method. The map comes prepopulated with ColorizeLevel, but can be
-// overridden. SetFormatMethod should be called (if at all) before one of
-// this package's Setup methods.
-func SetFormatMethod(name string, fn interface{}) {
- funcmap[name] = fn
-}
-
-// MustSetup is the same as Setup, but panics instead of returning an error
-func MustSetup(procname string, config SetupConfig) {
- err := Setup(procname, config)
- if err != nil {
- panic(err)
- }
-}
-
-type subprocInfo struct {
- Facility string
- Level string
- Name string
-}
-
-// Setup takes a given procname and sets spacelog up with the given
-// configuration. Setup supports:
-// * capturing stdout and stderr to a subprocess
-// * configuring the default level
-// * configuring log filters (enabling only some loggers)
-// * configuring the logging template
-// * configuring the output (a file, syslog, stdout, stderr)
-// * configuring log event buffering
-// * capturing all standard library logging with configurable log level
-// It is expected that this method will be called once at process start.
-func Setup(procname string, config SetupConfig) error {
- if config.Subproc != "" {
- t, err := template.New("subproc").Parse(config.Subproc)
- if err != nil {
- return err
- }
- var buf bytes.Buffer
- err = t.Execute(&buf, &subprocInfo{
- Facility: fmt.Sprintf("%d", config.Facility),
- Level: fmt.Sprintf("%d", 2), // syslog.LOG_CRIT
- Name: procname})
- if err != nil {
- return err
- }
- err = CaptureOutputToProcess("sh", "-c", string(buf.Bytes()))
- if err != nil {
- return err
- }
- }
- if config.Config != "" {
- err := ConfigureLoggers(config.Config)
- if err != nil {
- return err
- }
- }
- if config.Level != "" {
- level_val, err := LevelFromString(config.Level)
- if err != nil {
- return err
- }
- if level_val != DefaultLevel {
- SetLevel(nil, level_val)
- }
- }
- if config.Filter != "" {
- re, err := regexp.Compile(config.Filter)
- if err != nil {
- return err
- }
- SetLevel(re, LogLevel(math.MinInt32))
- }
- var t *template.Template
- if config.Format != "" {
- var err error
- t, err = template.New("user").Funcs(funcmap).Parse(config.Format)
- if err != nil {
- return err
- }
- }
- var textout TextOutput
- switch strings.ToLower(config.Output) {
- case "syslog":
- w, err := NewSyslogOutput(SyslogPriority(config.Facility), procname)
- if err != nil {
- return err
- }
- if t == nil {
- t = SyslogTemplate
- }
- textout = w
- case "stdout":
- if t == nil {
- t = DefaultTemplate
- }
- textout = NewWriterOutput(os.Stdout)
- case "stderr", "":
- if t == nil {
- t = DefaultTemplate
- }
- textout = NewWriterOutput(os.Stderr)
- default:
- if t == nil {
- t = StandardTemplate
- }
- var err error
- textout, err = NewFileWriterOutput(config.Output)
- if err != nil {
- return err
- }
- }
- if config.HupRotate {
- if hh, ok := textout.(HupHandlingTextOutput); ok {
- sigchan := make(chan os.Signal)
- signal.Notify(sigchan, sigHUP)
- go func() {
- for _ = range sigchan {
- hh.OnHup()
- }
- }()
- }
- }
- if config.Buffer > 0 {
- textout = NewBufferedOutput(textout, config.Buffer)
- }
- SetHandler(nil, NewTextHandler(t, textout))
- log.SetFlags(log.Lshortfile)
- if config.Stdlevel == "" {
- config.Stdlevel = "warn"
- }
- stdlog_level_val, err := LevelFromString(config.Stdlevel)
- if err != nil {
- return err
- }
- log.SetOutput(stdlog.WriterWithoutCaller(stdlog_level_val))
- return nil
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/sighup_appengine.go b/vendor/github.com/spacemonkeygo/spacelog/sighup_appengine.go
deleted file mode 100644
index c12ed961..00000000
--- a/vendor/github.com/spacemonkeygo/spacelog/sighup_appengine.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (C) 2017 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build appengine
-
-package spacelog
-
-import (
- "strconv"
-)
-
-const (
- sigHUP = syscallSignal(0x1)
-)
-
-type syscallSignal int
-
-func (s syscallSignal) Signal() {}
-
-func (s syscallSignal) String() string {
- switch s {
- case sigHUP:
- return "hangup"
- }
- return "signal " + strconv.Itoa(int(s))
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/sighup_other.go b/vendor/github.com/spacemonkeygo/spacelog/sighup_other.go
deleted file mode 100644
index 0e033a8d..00000000
--- a/vendor/github.com/spacemonkeygo/spacelog/sighup_other.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright (C) 2017 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !appengine
-
-package spacelog
-
-import "syscall"
-
-const (
- sigHUP = syscall.SIGHUP
-)
diff --git a/vendor/github.com/spacemonkeygo/spacelog/syslog.go b/vendor/github.com/spacemonkeygo/spacelog/syslog.go
deleted file mode 100644
index c2317b6c..00000000
--- a/vendor/github.com/spacemonkeygo/spacelog/syslog.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows
-
-package spacelog
-
-import (
- "bytes"
- "log/syslog"
-)
-
-type SyslogPriority syslog.Priority
-
-// SyslogOutput is a syslog client that matches the TextOutput interface
-type SyslogOutput struct {
- w *syslog.Writer
-}
-
-// NewSyslogOutput returns a TextOutput object that writes to syslog using
-// the given facility and tag. The log level will be determined by the log
-// event.
-func NewSyslogOutput(facility SyslogPriority, tag string) (
- TextOutput, error) {
- w, err := syslog.New(syslog.Priority(facility), tag)
- if err != nil {
- return nil, err
- }
- return &SyslogOutput{w: w}, nil
-}
-
-func (o *SyslogOutput) Output(level LogLevel, message []byte) {
- level = level.Match()
- for _, msg := range bytes.Split(message, []byte{'\n'}) {
- switch level {
- case Critical:
- o.w.Crit(string(msg))
- case Error:
- o.w.Err(string(msg))
- case Warning:
- o.w.Warning(string(msg))
- case Notice:
- o.w.Notice(string(msg))
- case Info:
- o.w.Info(string(msg))
- case Debug:
- fallthrough
- case Trace:
- fallthrough
- default:
- o.w.Debug(string(msg))
- }
- }
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/syslog_windows.go b/vendor/github.com/spacemonkeygo/spacelog/syslog_windows.go
deleted file mode 100644
index edba3c2a..00000000
--- a/vendor/github.com/spacemonkeygo/spacelog/syslog_windows.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-import (
- "fmt"
-)
-
-type SyslogPriority int
-
-func NewSyslogOutput(facility SyslogPriority, tag string) (
- TextOutput, error) {
- return nil, fmt.Errorf("SyslogOutput not supported on Windows")
-}
diff --git a/vendor/github.com/spacemonkeygo/spacelog/templates.go b/vendor/github.com/spacemonkeygo/spacelog/templates.go
deleted file mode 100644
index 959033da..00000000
--- a/vendor/github.com/spacemonkeygo/spacelog/templates.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-import (
- "text/template"
-)
-
-// ColorizeLevel returns a TermColor byte sequence for the appropriate color
-// for the level. If you'd like to configure your own color choices, you can
-// make your own template with its own function map to your own colorize
-// function.
-func ColorizeLevel(level LogLevel) string {
- switch level.Match() {
- case Critical, Error:
- return TermColors{}.Red()
- case Warning:
- return TermColors{}.Magenta()
- case Notice:
- return TermColors{}.Yellow()
- case Info, Debug, Trace:
- return TermColors{}.Green()
- }
- return ""
-}
-
-var (
- // ColorTemplate uses the default ColorizeLevel method for color choices.
- ColorTemplate = template.Must(template.New("color").Funcs(template.FuncMap{
- "ColorizeLevel": ColorizeLevel}).Parse(
- `{{.Blue}}{{.Date}} {{.Time}}{{.Reset}} ` +
- `{{.Bold}}{{ColorizeLevel .Level}}{{.LevelJustified}}{{.Reset}} ` +
- `{{.Underline}}{{.LoggerName}}{{.Reset}} ` +
- `{{if .Filename}}{{.Filename}}:{{.Line}} {{end}}- ` +
- `{{ColorizeLevel .Level}}{{.Message}}{{.Reset}}`))
-
- // StandardTemplate is like ColorTemplate with no color.
- StandardTemplate = template.Must(template.New("standard").Parse(
- `{{.Date}} {{.Time}} ` +
- `{{.Level}} {{.LoggerName}} ` +
- `{{if .Filename}}{{.Filename}}:{{.Line}} {{end}}` +
- `- {{.Message}}`))
-
- // SyslogTemplate is missing the date and time as syslog adds those
- // things.
- SyslogTemplate = template.Must(template.New("syslog").Parse(
- `{{.Level}} {{.LoggerName}} ` +
- `{{if .Filename}}{{.Filename}}:{{.Line}} {{end}}` +
- `- {{.Message}}`))
-
- // StdlibTemplate is missing the date and time as the stdlib logger often
- // adds those things.
- StdlibTemplate = template.Must(template.New("stdlib").Parse(
- `{{.Level}} {{.LoggerName}} ` +
- `{{if .Filename}}{{.Filename}}:{{.Line}} {{end}}` +
- `- {{.Message}}`))
-)
diff --git a/vendor/github.com/spacemonkeygo/spacelog/templates_others.go b/vendor/github.com/spacemonkeygo/spacelog/templates_others.go
deleted file mode 100644
index 114e2e14..00000000
--- a/vendor/github.com/spacemonkeygo/spacelog/templates_others.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows
-
-package spacelog
-
-var (
- // DefaultTemplate is default template for stdout/stderr for the platform
- DefaultTemplate = ColorTemplate
-)
diff --git a/vendor/github.com/spacemonkeygo/spacelog/templates_windows.go b/vendor/github.com/spacemonkeygo/spacelog/templates_windows.go
deleted file mode 100644
index 512b6004..00000000
--- a/vendor/github.com/spacemonkeygo/spacelog/templates_windows.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-var (
- // DefaultTemplate is default template for stdout/stderr for the platform
- DefaultTemplate = StandardTemplate
-)
diff --git a/vendor/github.com/spacemonkeygo/spacelog/text.go b/vendor/github.com/spacemonkeygo/spacelog/text.go
deleted file mode 100644
index 8b36ce99..00000000
--- a/vendor/github.com/spacemonkeygo/spacelog/text.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright (C) 2014 Space Monkey, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spacelog
-
-import (
- "bytes"
- "fmt"
- "runtime"
- "strings"
- "sync"
- "text/template"
- "time"
-)
-
-// TextHandler is the default implementation of the Handler interface. A
-// TextHandler, on log events, makes LogEvent structures, passes them to the
-// configured template, and then passes that output to a configured TextOutput
-// interface.
-type TextHandler struct {
- mtx sync.RWMutex
- template *template.Template
- output TextOutput
-}
-
-// NewTextHandler creates a Handler that creates LogEvents, passes them to
-// the given template, and passes the result to output
-func NewTextHandler(t *template.Template, output TextOutput) *TextHandler {
- return &TextHandler{template: t, output: output}
-}
-
-// Log makes a LogEvent, formats it with the configured template, then passes
-// the output to configured output sink
-func (h *TextHandler) Log(logger_name string, level LogLevel, msg string,
- calldepth int) {
- h.mtx.RLock()
- output, template := h.output, h.template
- h.mtx.RUnlock()
- event := LogEvent{
- LoggerName: logger_name,
- Level: level,
- Message: strings.TrimRight(msg, "\n\r"),
- Timestamp: time.Now()}
- if calldepth >= 0 {
- _, event.Filepath, event.Line, _ = runtime.Caller(calldepth + 1)
- }
- var buf bytes.Buffer
- err := template.Execute(&buf, &event)
- if err != nil {
- output.Output(level, []byte(
- fmt.Sprintf("log format template failed: %s", err)))
- return
- }
- output.Output(level, buf.Bytes())
-}
-
-// SetTextTemplate changes the TextHandler's text formatting template
-func (h *TextHandler) SetTextTemplate(t *template.Template) {
- h.mtx.Lock()
- defer h.mtx.Unlock()
- h.template = t
-}
-
-// SetTextOutput changes the TextHandler's TextOutput sink
-func (h *TextHandler) SetTextOutput(output TextOutput) {
- h.mtx.Lock()
- defer h.mtx.Unlock()
- h.output = output
-}
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
index fa1245b1..2924cf3a 100644
--- a/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -8,7 +8,6 @@ import (
"fmt"
"math"
"os"
- "path/filepath"
"reflect"
"regexp"
"runtime"
@@ -141,12 +140,11 @@ func CallerInfo() []string {
}
parts := strings.Split(file, "/")
- file = parts[len(parts)-1]
if len(parts) > 1 {
+ filename := parts[len(parts)-1]
dir := parts[len(parts)-2]
- if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
- path, _ := filepath.Abs(file)
- callers = append(callers, fmt.Sprintf("%s:%d", path, line))
+ if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" {
+ callers = append(callers, fmt.Sprintf("%s:%d", file, line))
}
}
@@ -530,7 +528,7 @@ func isNil(object interface{}) bool {
[]reflect.Kind{
reflect.Chan, reflect.Func,
reflect.Interface, reflect.Map,
- reflect.Ptr, reflect.Slice},
+ reflect.Ptr, reflect.Slice, reflect.UnsafePointer},
kind)
if isNilableKind && value.IsNil() {
@@ -818,49 +816,44 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
return true // we consider nil to be equal to the nil set
}
- defer func() {
- if e := recover(); e != nil {
- ok = false
- }
- }()
-
listKind := reflect.TypeOf(list).Kind()
- subsetKind := reflect.TypeOf(subset).Kind()
-
if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
}
+ subsetKind := reflect.TypeOf(subset).Kind()
if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
}
- subsetValue := reflect.ValueOf(subset)
if subsetKind == reflect.Map && listKind == reflect.Map {
- listValue := reflect.ValueOf(list)
- subsetKeys := subsetValue.MapKeys()
+ subsetMap := reflect.ValueOf(subset)
+ actualMap := reflect.ValueOf(list)
- for i := 0; i < len(subsetKeys); i++ {
- subsetKey := subsetKeys[i]
- subsetElement := subsetValue.MapIndex(subsetKey).Interface()
- listElement := listValue.MapIndex(subsetKey).Interface()
+ for _, k := range subsetMap.MapKeys() {
+ ev := subsetMap.MapIndex(k)
+ av := actualMap.MapIndex(k)
- if !ObjectsAreEqual(subsetElement, listElement) {
- return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...)
+ if !av.IsValid() {
+ return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...)
+ }
+ if !ObjectsAreEqual(ev.Interface(), av.Interface()) {
+ return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...)
}
}
return true
}
- for i := 0; i < subsetValue.Len(); i++ {
- element := subsetValue.Index(i).Interface()
+ subsetList := reflect.ValueOf(subset)
+ for i := 0; i < subsetList.Len(); i++ {
+ element := subsetList.Index(i).Interface()
ok, found := containsElement(list, element)
if !ok {
- return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", list), msgAndArgs...)
}
if !found {
- return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, element), msgAndArgs...)
}
}
@@ -879,34 +872,28 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...)
}
- defer func() {
- if e := recover(); e != nil {
- ok = false
- }
- }()
-
listKind := reflect.TypeOf(list).Kind()
- subsetKind := reflect.TypeOf(subset).Kind()
-
if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
}
+ subsetKind := reflect.TypeOf(subset).Kind()
if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
}
- subsetValue := reflect.ValueOf(subset)
if subsetKind == reflect.Map && listKind == reflect.Map {
- listValue := reflect.ValueOf(list)
- subsetKeys := subsetValue.MapKeys()
+ subsetMap := reflect.ValueOf(subset)
+ actualMap := reflect.ValueOf(list)
- for i := 0; i < len(subsetKeys); i++ {
- subsetKey := subsetKeys[i]
- subsetElement := subsetValue.MapIndex(subsetKey).Interface()
- listElement := listValue.MapIndex(subsetKey).Interface()
+ for _, k := range subsetMap.MapKeys() {
+ ev := subsetMap.MapIndex(k)
+ av := actualMap.MapIndex(k)
- if !ObjectsAreEqual(subsetElement, listElement) {
+ if !av.IsValid() {
+ return true
+ }
+ if !ObjectsAreEqual(ev.Interface(), av.Interface()) {
return true
}
}
@@ -914,8 +901,9 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
}
- for i := 0; i < subsetValue.Len(); i++ {
- element := subsetValue.Index(i).Interface()
+ subsetList := reflect.ValueOf(subset)
+ for i := 0; i < subsetList.Len(); i++ {
+ element := subsetList.Index(i).Interface()
ok, found := containsElement(list, element)
if !ok {
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
diff --git a/vendor/go.uber.org/fx/CHANGELOG.md b/vendor/go.uber.org/fx/CHANGELOG.md
index 208cb39e..9f58ac17 100644
--- a/vendor/go.uber.org/fx/CHANGELOG.md
+++ b/vendor/go.uber.org/fx/CHANGELOG.md
@@ -10,6 +10,11 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+## [1.19.2](https://github.com/uber-go/fx/compare/v1.19.1...v1.19.2) - 2023-02-21
+### Changed
+- Upgrade Dig dependency to v1.16.1.
+
+
## [1.19.1](https://github.com/uber-go/fx/compare/v1.18.0...v1.19.1) - 2023-01-10
### Changed
- Calling `fx.Stop()` after the `App` has already stopped no longer errors out.
diff --git a/vendor/go.uber.org/fx/version.go b/vendor/go.uber.org/fx/version.go
index 486815e6..971d8b95 100644
--- a/vendor/go.uber.org/fx/version.go
+++ b/vendor/go.uber.org/fx/version.go
@@ -21,4 +21,4 @@
package fx
// Version is exported for runtime compatibility checks.
-const Version = "1.19.1"
+const Version = "1.19.2"
diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md
index d2c8aada..f8177b97 100644
--- a/vendor/go.uber.org/multierr/CHANGELOG.md
+++ b/vendor/go.uber.org/multierr/CHANGELOG.md
@@ -1,6 +1,21 @@
Releases
========
+v1.11.0 (2023-03-28)
+====================
+- `Errors` now supports any error that implements multiple-error
+ interface.
+- Add `Every` function to allow checking if all errors in the chain
+ satisfies `errors.Is` against the target error.
+
+v1.10.0 (2023-03-08)
+====================
+
+- Comply with Go 1.20's multiple-error interface.
+- Drop Go 1.18 support.
+ Per the support policy, only Go 1.19 and 1.20 are supported now.
+- Drop all non-test external dependencies.
+
v1.9.0 (2022-12-12)
===================
diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md
index 70aacecd..5ab6ac40 100644
--- a/vendor/go.uber.org/multierr/README.md
+++ b/vendor/go.uber.org/multierr/README.md
@@ -2,9 +2,29 @@
`multierr` allows combining one or more Go `error`s together.
+## Features
+
+- **Idiomatic**:
+ multierr follows best practices in Go, and keeps your code idiomatic.
+ - It keeps the underlying error type hidden,
+ allowing you to deal in `error` values exclusively.
+ - It provides APIs to safely append into an error from a `defer` statement.
+- **Performant**:
+ multierr is optimized for performance:
+ - It avoids allocations where possible.
+ - It utilizes slice resizing semantics to optimize common cases
+ like appending into the same error object from a loop.
+- **Interoperable**:
+ multierr interoperates with the Go standard library's error APIs seamlessly:
+ - The `errors.Is` and `errors.As` functions *just work*.
+- **Lightweight**:
+ multierr comes with virtually no dependencies.
+
## Installation
- go get -u go.uber.org/multierr
+```bash
+go get -u go.uber.org/multierr@latest
+```
## Status
diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go
index cdd91ae5..3a828b2d 100644
--- a/vendor/go.uber.org/multierr/error.go
+++ b/vendor/go.uber.org/multierr/error.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2017-2021 Uber Technologies, Inc.
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -147,8 +147,7 @@ import (
"io"
"strings"
"sync"
-
- "go.uber.org/atomic"
+ "sync/atomic"
)
var (
@@ -196,23 +195,7 @@ type errorGroup interface {
//
// Callers of this function are free to modify the returned slice.
func Errors(err error) []error {
- if err == nil {
- return nil
- }
-
- // Note that we're casting to multiError, not errorGroup. Our contract is
- // that returned errors MAY implement errorGroup. Errors, however, only
- // has special behavior for multierr-specific error objects.
- //
- // This behavior can be expanded in the future but I think it's prudent to
- // start with as little as possible in terms of contract and possibility
- // of misuse.
- eg, ok := err.(*multiError)
- if !ok {
- return []error{err}
- }
-
- return append(([]error)(nil), eg.Errors()...)
+ return extractErrors(err)
}
// multiError is an error that holds one or more errors.
@@ -227,8 +210,6 @@ type multiError struct {
errors []error
}
-var _ errorGroup = (*multiError)(nil)
-
// Errors returns the list of underlying errors.
//
// This slice MUST NOT be modified.
@@ -239,33 +220,6 @@ func (merr *multiError) Errors() []error {
return merr.errors
}
-// As attempts to find the first error in the error list that matches the type
-// of the value that target points to.
-//
-// This function allows errors.As to traverse the values stored on the
-// multierr error.
-func (merr *multiError) As(target interface{}) bool {
- for _, err := range merr.Errors() {
- if errors.As(err, target) {
- return true
- }
- }
- return false
-}
-
-// Is attempts to match the provided error against errors in the error list.
-//
-// This function allows errors.Is to traverse the values stored on the
-// multierr error.
-func (merr *multiError) Is(target error) bool {
- for _, err := range merr.Errors() {
- if errors.Is(err, target) {
- return true
- }
- }
- return false
-}
-
func (merr *multiError) Error() string {
if merr == nil {
return ""
@@ -281,6 +235,17 @@ func (merr *multiError) Error() string {
return result
}
+// Every compares every error in the given err against the given target error
+// using [errors.Is], and returns true only if every comparison returned true.
+func Every(err error, target error) bool {
+ for _, e := range extractErrors(err) {
+ if !errors.Is(e, target) {
+ return false
+ }
+ }
+ return true
+}
+
func (merr *multiError) Format(f fmt.State, c rune) {
if c == 'v' && f.Flag('+') {
merr.writeMultiline(f)
diff --git a/vendor/go.uber.org/multierr/error_post_go120.go b/vendor/go.uber.org/multierr/error_post_go120.go
new file mode 100644
index 00000000..a173f9c2
--- /dev/null
+++ b/vendor/go.uber.org/multierr/error_post_go120.go
@@ -0,0 +1,48 @@
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build go1.20
+// +build go1.20
+
+package multierr
+
+// Unwrap returns a list of errors wrapped by this multierr.
+func (merr *multiError) Unwrap() []error {
+ return merr.Errors()
+}
+
+type multipleErrors interface {
+ Unwrap() []error
+}
+
+func extractErrors(err error) []error {
+ if err == nil {
+ return nil
+ }
+
+ // check if the given err is an Unwrapable error that
+ // implements multipleErrors interface.
+ eg, ok := err.(multipleErrors)
+ if !ok {
+ return []error{err}
+ }
+
+ return append(([]error)(nil), eg.Unwrap()...)
+}
diff --git a/vendor/go.uber.org/multierr/error_pre_go120.go b/vendor/go.uber.org/multierr/error_pre_go120.go
new file mode 100644
index 00000000..93872a3f
--- /dev/null
+++ b/vendor/go.uber.org/multierr/error_pre_go120.go
@@ -0,0 +1,79 @@
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build !go1.20
+// +build !go1.20
+
+package multierr
+
+import "errors"
+
+// Versions of Go before 1.20 did not support the Unwrap() []error method.
+// This provides a similar behavior by implementing the Is(..) and As(..)
+// methods.
+// See the errors.Join proposal for details:
+// https://github.com/golang/go/issues/53435
+
+// As attempts to find the first error in the error list that matches the type
+// of the value that target points to.
+//
+// This function allows errors.As to traverse the values stored on the
+// multierr error.
+func (merr *multiError) As(target interface{}) bool {
+ for _, err := range merr.Errors() {
+ if errors.As(err, target) {
+ return true
+ }
+ }
+ return false
+}
+
+// Is attempts to match the provided error against errors in the error list.
+//
+// This function allows errors.Is to traverse the values stored on the
+// multierr error.
+func (merr *multiError) Is(target error) bool {
+ for _, err := range merr.Errors() {
+ if errors.Is(err, target) {
+ return true
+ }
+ }
+ return false
+}
+
+func extractErrors(err error) []error {
+ if err == nil {
+ return nil
+ }
+
+ // Note that we're casting to multiError, not errorGroup. Our contract is
+ // that returned errors MAY implement errorGroup. Errors, however, only
+ // has special behavior for multierr-specific error objects.
+ //
+ // This behavior can be expanded in the future but I think it's prudent to
+ // start with as little as possible in terms of contract and possibility
+ // of misuse.
+ eg, ok := err.(*multiError)
+ if !ok {
+ return []error{err}
+ }
+
+ return append(([]error)(nil), eg.Errors()...)
+}
diff --git a/vendor/go.uber.org/multierr/glide.yaml b/vendor/go.uber.org/multierr/glide.yaml
deleted file mode 100644
index 6ef084ec..00000000
--- a/vendor/go.uber.org/multierr/glide.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-package: go.uber.org/multierr
-import:
-- package: go.uber.org/atomic
- version: ^1
-testImport:
-- package: github.com/stretchr/testify
- subpackages:
- - assert
diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go
new file mode 100644
index 00000000..cff0cd49
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/slices.go
@@ -0,0 +1,258 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package slices defines various functions useful with slices of any type.
+// Unless otherwise specified, these functions all apply to the elements
+// of a slice at index 0 <= i < len(s).
+//
+// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a
+// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings),
+// or the sorting may fail to sort correctly. A common case is when sorting slices of
+// floating-point numbers containing NaN values.
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// Equal reports whether two slices are equal: the same length and all
+// elements equal. If the lengths are different, Equal returns false.
+// Otherwise, the elements are compared in increasing index order, and the
+// comparison stops at the first unequal pair.
+// Floating point NaNs are not considered equal.
+func Equal[E comparable](s1, s2 []E) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i := range s1 {
+ if s1[i] != s2[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// EqualFunc reports whether two slices are equal using a comparison
+// function on each pair of elements. If the lengths are different,
+// EqualFunc returns false. Otherwise, the elements are compared in
+// increasing index order, and the comparison stops at the first index
+// for which eq returns false.
+func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if !eq(v1, v2) {
+ return false
+ }
+ }
+ return true
+}
+
+// Compare compares the elements of s1 and s2.
+// The elements are compared sequentially, starting at index 0,
+// until one element is not equal to the other.
+// The result of comparing the first non-matching elements is returned.
+// If both slices are equal until one of them ends, the shorter slice is
+// considered less than the longer one.
+// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
+// Comparisons involving floating point NaNs are ignored.
+func Compare[E constraints.Ordered](s1, s2 []E) int {
+ s2len := len(s2)
+ for i, v1 := range s1 {
+ if i >= s2len {
+ return +1
+ }
+ v2 := s2[i]
+ switch {
+ case v1 < v2:
+ return -1
+ case v1 > v2:
+ return +1
+ }
+ }
+ if len(s1) < s2len {
+ return -1
+ }
+ return 0
+}
+
+// CompareFunc is like Compare but uses a comparison function
+// on each pair of elements. The elements are compared in increasing
+// index order, and the comparisons stop after the first time cmp
+// returns non-zero.
+// The result is the first non-zero result of cmp; if cmp always
+// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
+// and +1 if len(s1) > len(s2).
+func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int {
+ s2len := len(s2)
+ for i, v1 := range s1 {
+ if i >= s2len {
+ return +1
+ }
+ v2 := s2[i]
+ if c := cmp(v1, v2); c != 0 {
+ return c
+ }
+ }
+ if len(s1) < s2len {
+ return -1
+ }
+ return 0
+}
+
+// Index returns the index of the first occurrence of v in s,
+// or -1 if not present.
+func Index[E comparable](s []E, v E) int {
+ for i, vs := range s {
+ if v == vs {
+ return i
+ }
+ }
+ return -1
+}
+
+// IndexFunc returns the first index i satisfying f(s[i]),
+// or -1 if none do.
+func IndexFunc[E any](s []E, f func(E) bool) int {
+ for i, v := range s {
+ if f(v) {
+ return i
+ }
+ }
+ return -1
+}
+
+// Contains reports whether v is present in s.
+func Contains[E comparable](s []E, v E) bool {
+ return Index(s, v) >= 0
+}
+
+// ContainsFunc reports whether at least one
+// element e of s satisfies f(e).
+func ContainsFunc[E any](s []E, f func(E) bool) bool {
+ return IndexFunc(s, f) >= 0
+}
+
+// Insert inserts the values v... into s at index i,
+// returning the modified slice.
+// In the returned slice r, r[i] == v[0].
+// Insert panics if i is out of range.
+// This function is O(len(s) + len(v)).
+func Insert[S ~[]E, E any](s S, i int, v ...E) S {
+ tot := len(s) + len(v)
+ if tot <= cap(s) {
+ s2 := s[:tot]
+ copy(s2[i+len(v):], s[i:])
+ copy(s2[i:], v)
+ return s2
+ }
+ s2 := make(S, tot)
+ copy(s2, s[:i])
+ copy(s2[i:], v)
+ copy(s2[i+len(v):], s[i:])
+ return s2
+}
+
+// Delete removes the elements s[i:j] from s, returning the modified slice.
+// Delete panics if s[i:j] is not a valid slice of s.
+// Delete modifies the contents of the slice s; it does not create a new slice.
+// Delete is O(len(s)-j), so if many items must be deleted, it is better to
+// make a single call deleting them all together than to delete one at a time.
+// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those
+// elements contain pointers you might consider zeroing those elements so that
+// objects they reference can be garbage collected.
+func Delete[S ~[]E, E any](s S, i, j int) S {
+ _ = s[i:j] // bounds check
+
+ return append(s[:i], s[j:]...)
+}
+
+// Replace replaces the elements s[i:j] by the given v, and returns the
+// modified slice. Replace panics if s[i:j] is not a valid slice of s.
+func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
+ _ = s[i:j] // verify that i:j is a valid subslice
+ tot := len(s[:i]) + len(v) + len(s[j:])
+ if tot <= cap(s) {
+ s2 := s[:tot]
+ copy(s2[i+len(v):], s[j:])
+ copy(s2[i:], v)
+ return s2
+ }
+ s2 := make(S, tot)
+ copy(s2, s[:i])
+ copy(s2[i:], v)
+ copy(s2[i+len(v):], s[j:])
+ return s2
+}
+
+// Clone returns a copy of the slice.
+// The elements are copied using assignment, so this is a shallow clone.
+func Clone[S ~[]E, E any](s S) S {
+ // Preserve nil in case it matters.
+ if s == nil {
+ return nil
+ }
+ return append(S([]E{}), s...)
+}
+
+// Compact replaces consecutive runs of equal elements with a single copy.
+// This is like the uniq command found on Unix.
+// Compact modifies the contents of the slice s; it does not create a new slice.
+// When Compact discards m elements in total, it might not modify the elements
+// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
+// zeroing those elements so that objects they reference can be garbage collected.
+func Compact[S ~[]E, E comparable](s S) S {
+ if len(s) < 2 {
+ return s
+ }
+ i := 1
+ last := s[0]
+ for _, v := range s[1:] {
+ if v != last {
+ s[i] = v
+ i++
+ last = v
+ }
+ }
+ return s[:i]
+}
+
+// CompactFunc is like Compact but uses a comparison function.
+func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
+ if len(s) < 2 {
+ return s
+ }
+ i := 1
+ last := s[0]
+ for _, v := range s[1:] {
+ if !eq(v, last) {
+ s[i] = v
+ i++
+ last = v
+ }
+ }
+ return s[:i]
+}
+
+// Grow increases the slice's capacity, if necessary, to guarantee space for
+// another n elements. After Grow(n), at least n elements can be appended
+// to the slice without another allocation. If n is negative or too large to
+// allocate the memory, Grow panics.
+func Grow[S ~[]E, E any](s S, n int) S {
+ if n < 0 {
+ panic("cannot be negative")
+ }
+ if n -= cap(s) - len(s); n > 0 {
+ // TODO(https://go.dev/issue/53888): Make using []E instead of S
+ // to workaround a compiler bug where the runtime.growslice optimization
+ // does not take effect. Revert when the compiler is fixed.
+ s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
+ }
+ return s
+}
+
+// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
+func Clip[S ~[]E, E any](s S) S {
+ return s[:len(s):len(s)]
+}
diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go
new file mode 100644
index 00000000..f14f40da
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/sort.go
@@ -0,0 +1,126 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import (
+ "math/bits"
+
+ "golang.org/x/exp/constraints"
+)
+
+// Sort sorts a slice of any ordered type in ascending order.
+// Sort may fail to sort correctly when sorting slices of floating-point
+// numbers containing Not-a-number (NaN) values.
+// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))})
+// instead if the input may contain NaNs.
+func Sort[E constraints.Ordered](x []E) {
+ n := len(x)
+ pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
+}
+
+// SortFunc sorts the slice x in ascending order as determined by the less function.
+// This sort is not guaranteed to be stable.
+//
+// SortFunc requires that less is a strict weak ordering.
+// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
+func SortFunc[E any](x []E, less func(a, b E) bool) {
+ n := len(x)
+ pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less)
+}
+
+// SortStableFunc sorts the slice x while keeping the original order of equal
+// elements, using less to compare elements.
+func SortStableFunc[E any](x []E, less func(a, b E) bool) {
+ stableLessFunc(x, len(x), less)
+}
+
+// IsSorted reports whether x is sorted in ascending order.
+func IsSorted[E constraints.Ordered](x []E) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if x[i] < x[i-1] {
+ return false
+ }
+ }
+ return true
+}
+
+// IsSortedFunc reports whether x is sorted in ascending order, with less as the
+// comparison function.
+func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if less(x[i], x[i-1]) {
+ return false
+ }
+ }
+ return true
+}
+
+// BinarySearch searches for target in a sorted slice and returns the position
+// where target is found, or the position where target would appear in the
+// sort order; it also returns a bool saying whether the target is really found
+// in the slice. The slice must be sorted in increasing order.
+func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) {
+ // Inlining is faster than calling BinarySearchFunc with a lambda.
+ n := len(x)
+ // Define x[-1] < target and x[n] >= target.
+ // Invariant: x[i-1] < target, x[j] >= target.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if x[h] < target {
+ i = h + 1 // preserves x[i-1] < target
+ } else {
+ j = h // preserves x[j] >= target
+ }
+ }
+ // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
+ return i, i < n && x[i] == target
+}
+
+// BinarySearchFunc works like BinarySearch, but uses a custom comparison
+// function. The slice must be sorted in increasing order, where "increasing" is
+// defined by cmp. cmp(a, b) is expected to return an integer comparing the two
+// parameters: 0 if a == b, a negative number if a < b and a positive number if
+// a > b.
+func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) {
+ n := len(x)
+ // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
+ // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if cmp(x[h], target) < 0 {
+ i = h + 1 // preserves cmp(x[i - 1], target) < 0
+ } else {
+ j = h // preserves cmp(x[j], target) >= 0
+ }
+ }
+ // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
+ return i, i < n && cmp(x[i], target) == 0
+}
+
+type sortedHint int // hint for pdqsort when choosing the pivot
+
+const (
+ unknownHint sortedHint = iota
+ increasingHint
+ decreasingHint
+)
+
+// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
+type xorshift uint64
+
+func (r *xorshift) Next() uint64 {
+ *r ^= *r << 13
+ *r ^= *r >> 17
+ *r ^= *r << 5
+ return uint64(*r)
+}
+
+func nextPowerOfTwo(length int) uint {
+ return 1 << bits.Len(uint(length))
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortfunc.go
new file mode 100644
index 00000000..2a632476
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/zsortfunc.go
@@ -0,0 +1,479 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+// insertionSortLessFunc sorts data[a:b] using insertion sort.
+func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && less(data[j], data[j-1]); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownLessFunc implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && less(data[first+child], data[first+child+1]) {
+ child++
+ }
+ if !less(data[first+root], data[first+child]) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownLessFunc(data, i, hi, first, less)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownLessFunc(data, lo, i, first, less)
+ }
+}
+
+// pdqsortLessFunc sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortLessFunc(data, a, b, less)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortLessFunc(data, a, b, less)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsLessFunc(data, a, b, less)
+ limit--
+ }
+
+ pivot, hint := choosePivotLessFunc(data, a, b, less)
+ if hint == decreasingHint {
+ reverseRangeLessFunc(data, a, b, less)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortLessFunc(data, a, b, less) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !less(data[a-1], data[pivot]) {
+ mid := partitionEqualLessFunc(data, a, b, pivot, less)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortLessFunc(data, a, mid, limit, less)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortLessFunc(data, mid+1, b, limit, less)
+ b = mid
+ }
+ }
+}
+
+// partitionLessFunc does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && less(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !less(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && less(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !less(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !less(data[a], data[i]) {
+ i++
+ }
+ for i <= j && less(data[a], data[j]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !less(data[i], data[i-1]) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !less(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !less(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotLessFunc chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentLessFunc(data, i, &swaps, less)
+ j = medianAdjacentLessFunc(data, j, &swaps, less)
+ k = medianAdjacentLessFunc(data, k, &swaps, less)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianLessFunc(data, i, j, k, &swaps, less)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) {
+ if less(data[b], data[a]) {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int {
+ a, b = order2LessFunc(data, a, b, swaps, less)
+ b, c = order2LessFunc(data, b, c, swaps, less)
+ a, b = order2LessFunc(data, a, b, swaps, less)
+ return b
+}
+
+// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int {
+ return medianLessFunc(data, a-1, a, a+1, swaps, less)
+}
+
+func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortLessFunc(data, a, b, less)
+ a = b
+ b += blockSize
+ }
+ insertionSortLessFunc(data, a, n, less)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeLessFunc(data, a, a+blockSize, b, less)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeLessFunc(data, a, m, n, less)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if less(data[h], data[a]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !less(data[m], data[h]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !less(data[p-c], data[c]) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateLessFunc(data, start, m, end, less)
+ }
+ if a < start && start < mid {
+ symMergeLessFunc(data, a, start, mid, less)
+ }
+ if mid < end && end < b {
+ symMergeLessFunc(data, mid, end, b, less)
+ }
+}
+
+// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeLessFunc(data, m-i, m, j, less)
+ i -= j
+ } else {
+ swapRangeLessFunc(data, m-i, m+j-i, i, less)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeLessFunc(data, m-i, m, i, less)
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go
new file mode 100644
index 00000000..efaa1c8b
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/zsortordered.go
@@ -0,0 +1,481 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// insertionSortOrdered sorts data[a:b] using insertion sort.
+func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && (data[j] < data[j-1]); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownOrdered implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && (data[first+child] < data[first+child+1]) {
+ child++
+ }
+ if !(data[first+root] < data[first+child]) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownOrdered(data, i, hi, first)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownOrdered(data, lo, i, first)
+ }
+}
+
+// pdqsortOrdered sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortOrdered(data, a, b)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortOrdered(data, a, b)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsOrdered(data, a, b)
+ limit--
+ }
+
+ pivot, hint := choosePivotOrdered(data, a, b)
+ if hint == decreasingHint {
+ reverseRangeOrdered(data, a, b)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortOrdered(data, a, b) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !(data[a-1] < data[pivot]) {
+ mid := partitionEqualOrdered(data, a, b, pivot)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortOrdered(data, a, mid, limit)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortOrdered(data, mid+1, b, limit)
+ b = mid
+ }
+ }
+}
+
+// partitionOrdered does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && (data[i] < data[a]) {
+ i++
+ }
+ for i <= j && !(data[j] < data[a]) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && (data[i] < data[a]) {
+ i++
+ }
+ for i <= j && !(data[j] < data[a]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !(data[a] < data[i]) {
+ i++
+ }
+ for i <= j && (data[a] < data[j]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !(data[i] < data[i-1]) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !(data[j] < data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !(data[j] < data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotOrdered chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentOrdered(data, i, &swaps)
+ j = medianAdjacentOrdered(data, j, &swaps)
+ k = medianAdjacentOrdered(data, k, &swaps)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianOrdered(data, i, j, k, &swaps)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
+ if data[b] < data[a] {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
+ a, b = order2Ordered(data, a, b, swaps)
+ b, c = order2Ordered(data, b, c, swaps)
+ a, b = order2Ordered(data, a, b, swaps)
+ return b
+}
+
+// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
+ return medianOrdered(data, a-1, a, a+1, swaps)
+}
+
+func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableOrdered[E constraints.Ordered](data []E, n int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortOrdered(data, a, b)
+ a = b
+ b += blockSize
+ }
+ insertionSortOrdered(data, a, n)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeOrdered(data, a, a+blockSize, b)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeOrdered(data, a, m, n)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if data[h] < data[a] {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !(data[m] < data[h]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !(data[p-c] < data[c]) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateOrdered(data, start, m, end)
+ }
+ if a < start && start < mid {
+ symMergeOrdered(data, a, start, mid)
+ }
+ if mid < end && end < b {
+ symMergeOrdered(data, mid, end, b)
+ }
+}
+
+// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeOrdered(data, m-i, m, j)
+ i -= j
+ } else {
+ swapRangeOrdered(data, m-i, m+j-i, i)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeOrdered(data, m-i, m, i)
+}
diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
deleted file mode 100644
index 37dc0cfd..00000000
--- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
-package ctxhttp // import "golang.org/x/net/context/ctxhttp"
-
-import (
- "context"
- "io"
- "net/http"
- "net/url"
- "strings"
-)
-
-// Do sends an HTTP request with the provided http.Client and returns
-// an HTTP response.
-//
-// If the client is nil, http.DefaultClient is used.
-//
-// The provided ctx must be non-nil. If it is canceled or times out,
-// ctx.Err() will be returned.
-func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
- if client == nil {
- client = http.DefaultClient
- }
- resp, err := client.Do(req.WithContext(ctx))
- // If we got an error, and the context has been canceled,
- // the context's error is probably more useful.
- if err != nil {
- select {
- case <-ctx.Done():
- err = ctx.Err()
- default:
- }
- }
- return resp, err
-}
-
-// Get issues a GET request via the Do function.
-func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
- req, err := http.NewRequest("GET", url, nil)
- if err != nil {
- return nil, err
- }
- return Do(ctx, client, req)
-}
-
-// Head issues a HEAD request via the Do function.
-func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
- req, err := http.NewRequest("HEAD", url, nil)
- if err != nil {
- return nil, err
- }
- return Do(ctx, client, req)
-}
-
-// Post issues a POST request via the Do function.
-func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
- req, err := http.NewRequest("POST", url, body)
- if err != nil {
- return nil, err
- }
- req.Header.Set("Content-Type", bodyType)
- return Do(ctx, client, req)
-}
-
-// PostForm issues a POST request via the Do function.
-func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
- return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
-}
diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go
index 355c3869..b4723fca 100644
--- a/vendor/golang.org/x/oauth2/internal/token.go
+++ b/vendor/golang.org/x/oauth2/internal/token.go
@@ -19,8 +19,6 @@ import (
"strings"
"sync"
"time"
-
- "golang.org/x/net/context/ctxhttp"
)
// Token represents the credentials used to authorize
@@ -229,7 +227,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string,
}
func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
- r, err := ctxhttp.Do(ctx, ContextClient(ctx), req)
+ r, err := ContextClient(ctx).Do(req.WithContext(ctx))
if err != nil {
return nil, err
}
diff --git a/vendor/golang.org/x/tools/cmd/goimports/doc.go b/vendor/golang.org/x/tools/cmd/goimports/doc.go
new file mode 100644
index 00000000..18a3ad44
--- /dev/null
+++ b/vendor/golang.org/x/tools/cmd/goimports/doc.go
@@ -0,0 +1,50 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Command goimports updates your Go import lines,
+adding missing ones and removing unreferenced ones.
+
+ $ go install golang.org/x/tools/cmd/goimports@latest
+
+In addition to fixing imports, goimports also formats
+your code in the same style as gofmt so it can be used
+as a replacement for your editor's gofmt-on-save hook.
+
+For emacs, make sure you have the latest go-mode.el:
+
+ https://github.com/dominikh/go-mode.el
+
+Then in your .emacs file:
+
+ (setq gofmt-command "goimports")
+ (add-hook 'before-save-hook 'gofmt-before-save)
+
+For vim, set "gofmt_command" to "goimports":
+
+ https://golang.org/change/39c724dd7f252
+ https://golang.org/wiki/IDEsAndTextEditorPlugins
+ etc
+
+For GoSublime, follow the steps described here:
+
+ http://michaelwhatcott.com/gosublime-goimports/
+
+For other editors, you probably know what to do.
+
+To exclude directories in your $GOPATH from being scanned for Go
+files, goimports respects a configuration file at
+$GOPATH/src/.goimportsignore which may contain blank lines, comment
+lines (beginning with '#'), or lines naming a directory relative to
+the configuration file to ignore when scanning. No globbing or regex
+patterns are allowed. Use the "-v" verbose flag to verify it's
+working and see what goimports is doing.
+
+File bugs or feature requests at:
+
+ https://golang.org/issues/new?title=x/tools/cmd/goimports:+
+
+Happy hacking!
+*/
+package main // import "golang.org/x/tools/cmd/goimports"
diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports.go b/vendor/golang.org/x/tools/cmd/goimports/goimports.go
new file mode 100644
index 00000000..b354c9e8
--- /dev/null
+++ b/vendor/golang.org/x/tools/cmd/goimports/goimports.go
@@ -0,0 +1,380 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "go/scanner"
+ exec "golang.org/x/sys/execabs"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "runtime"
+ "runtime/pprof"
+ "strings"
+
+ "golang.org/x/tools/internal/gocommand"
+ "golang.org/x/tools/internal/imports"
+)
+
+var (
+ // main operation modes
+ list = flag.Bool("l", false, "list files whose formatting differs from goimport's")
+ write = flag.Bool("w", false, "write result to (source) file instead of stdout")
+ doDiff = flag.Bool("d", false, "display diffs instead of rewriting files")
+ srcdir = flag.String("srcdir", "", "choose imports as if source code is from `dir`. When operating on a single file, dir may instead be the complete file name.")
+
+ verbose bool // verbose logging
+
+ cpuProfile = flag.String("cpuprofile", "", "CPU profile output")
+ memProfile = flag.String("memprofile", "", "memory profile output")
+ memProfileRate = flag.Int("memrate", 0, "if > 0, sets runtime.MemProfileRate")
+
+ options = &imports.Options{
+ TabWidth: 8,
+ TabIndent: true,
+ Comments: true,
+ Fragment: true,
+ Env: &imports.ProcessEnv{
+ GocmdRunner: &gocommand.Runner{},
+ },
+ }
+ exitCode = 0
+)
+
+func init() {
+ flag.BoolVar(&options.AllErrors, "e", false, "report all errors (not just the first 10 on different lines)")
+ flag.StringVar(&options.LocalPrefix, "local", "", "put imports beginning with this string after 3rd-party packages; comma-separated list")
+ flag.BoolVar(&options.FormatOnly, "format-only", false, "if true, don't fix imports and only format. In this mode, goimports is effectively gofmt, with the addition that imports are grouped into sections.")
+}
+
+func report(err error) {
+ scanner.PrintError(os.Stderr, err)
+ exitCode = 2
+}
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "usage: goimports [flags] [path ...]\n")
+ flag.PrintDefaults()
+ os.Exit(2)
+}
+
+func isGoFile(f os.FileInfo) bool {
+ // ignore non-Go files
+ name := f.Name()
+ return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go")
+}
+
+// argumentType is which mode goimports was invoked as.
+type argumentType int
+
+const (
+ // fromStdin means the user is piping their source into goimports.
+ fromStdin argumentType = iota
+
+ // singleArg is the common case from editors, when goimports is run on
+ // a single file.
+ singleArg
+
+ // multipleArg is when the user ran "goimports file1.go file2.go"
+ // or ran goimports on a directory tree.
+ multipleArg
+)
+
+func processFile(filename string, in io.Reader, out io.Writer, argType argumentType) error {
+ opt := options
+ if argType == fromStdin {
+ nopt := *options
+ nopt.Fragment = true
+ opt = &nopt
+ }
+
+ if in == nil {
+ f, err := os.Open(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ in = f
+ }
+
+ src, err := ioutil.ReadAll(in)
+ if err != nil {
+ return err
+ }
+
+ target := filename
+ if *srcdir != "" {
+ // Determine whether the provided -srcdirc is a directory or file
+ // and then use it to override the target.
+ //
+ // See https://github.com/dominikh/go-mode.el/issues/146
+ if isFile(*srcdir) {
+ if argType == multipleArg {
+ return errors.New("-srcdir value can't be a file when passing multiple arguments or when walking directories")
+ }
+ target = *srcdir
+ } else if argType == singleArg && strings.HasSuffix(*srcdir, ".go") && !isDir(*srcdir) {
+ // For a file which doesn't exist on disk yet, but might shortly.
+ // e.g. user in editor opens $DIR/newfile.go and newfile.go doesn't yet exist on disk.
+ // The goimports on-save hook writes the buffer to a temp file
+ // first and runs goimports before the actual save to newfile.go.
+ // The editor's buffer is named "newfile.go" so that is passed to goimports as:
+ // goimports -srcdir=/gopath/src/pkg/newfile.go /tmp/gofmtXXXXXXXX.go
+ // and then the editor reloads the result from the tmp file and writes
+ // it to newfile.go.
+ target = *srcdir
+ } else {
+ // Pretend that file is from *srcdir in order to decide
+ // visible imports correctly.
+ target = filepath.Join(*srcdir, filepath.Base(filename))
+ }
+ }
+
+ res, err := imports.Process(target, src, opt)
+ if err != nil {
+ return err
+ }
+
+ if !bytes.Equal(src, res) {
+ // formatting has changed
+ if *list {
+ fmt.Fprintln(out, filename)
+ }
+ if *write {
+ if argType == fromStdin {
+ // filename is ""
+ return errors.New("can't use -w on stdin")
+ }
+ // On Windows, we need to re-set the permissions from the file. See golang/go#38225.
+ var perms os.FileMode
+ if fi, err := os.Stat(filename); err == nil {
+ perms = fi.Mode() & os.ModePerm
+ }
+ err = ioutil.WriteFile(filename, res, perms)
+ if err != nil {
+ return err
+ }
+ }
+ if *doDiff {
+ if argType == fromStdin {
+ filename = "stdin.go" // because .orig looks silly
+ }
+ data, err := diff(src, res, filename)
+ if err != nil {
+ return fmt.Errorf("computing diff: %s", err)
+ }
+ fmt.Printf("diff -u %s %s\n", filepath.ToSlash(filename+".orig"), filepath.ToSlash(filename))
+ out.Write(data)
+ }
+ }
+
+ if !*list && !*write && !*doDiff {
+ _, err = out.Write(res)
+ }
+
+ return err
+}
+
+func visitFile(path string, f os.FileInfo, err error) error {
+ if err == nil && isGoFile(f) {
+ err = processFile(path, nil, os.Stdout, multipleArg)
+ }
+ if err != nil {
+ report(err)
+ }
+ return nil
+}
+
+func walkDir(path string) {
+ filepath.Walk(path, visitFile)
+}
+
+func main() {
+ runtime.GOMAXPROCS(runtime.NumCPU())
+
+ // call gofmtMain in a separate function
+ // so that it can use defer and have them
+ // run before the exit.
+ gofmtMain()
+ os.Exit(exitCode)
+}
+
+// parseFlags parses command line flags and returns the paths to process.
+// It's a var so that custom implementations can replace it in other files.
+var parseFlags = func() []string {
+ flag.BoolVar(&verbose, "v", false, "verbose logging")
+
+ flag.Parse()
+ return flag.Args()
+}
+
+func bufferedFileWriter(dest string) (w io.Writer, close func()) {
+ f, err := os.Create(dest)
+ if err != nil {
+ log.Fatal(err)
+ }
+ bw := bufio.NewWriter(f)
+ return bw, func() {
+ if err := bw.Flush(); err != nil {
+ log.Fatalf("error flushing %v: %v", dest, err)
+ }
+ if err := f.Close(); err != nil {
+ log.Fatal(err)
+ }
+ }
+}
+
+func gofmtMain() {
+ flag.Usage = usage
+ paths := parseFlags()
+
+ if *cpuProfile != "" {
+ bw, flush := bufferedFileWriter(*cpuProfile)
+ pprof.StartCPUProfile(bw)
+ defer flush()
+ defer pprof.StopCPUProfile()
+ }
+ // doTrace is a conditionally compiled wrapper around runtime/trace. It is
+ // used to allow goimports to compile under gccgo, which does not support
+ // runtime/trace. See https://golang.org/issue/15544.
+ defer doTrace()()
+ if *memProfileRate > 0 {
+ runtime.MemProfileRate = *memProfileRate
+ bw, flush := bufferedFileWriter(*memProfile)
+ defer func() {
+ runtime.GC() // materialize all statistics
+ if err := pprof.WriteHeapProfile(bw); err != nil {
+ log.Fatal(err)
+ }
+ flush()
+ }()
+ }
+
+ if verbose {
+ log.SetFlags(log.LstdFlags | log.Lmicroseconds)
+ options.Env.Logf = log.Printf
+ }
+ if options.TabWidth < 0 {
+ fmt.Fprintf(os.Stderr, "negative tabwidth %d\n", options.TabWidth)
+ exitCode = 2
+ return
+ }
+
+ if len(paths) == 0 {
+ if err := processFile("", os.Stdin, os.Stdout, fromStdin); err != nil {
+ report(err)
+ }
+ return
+ }
+
+ argType := singleArg
+ if len(paths) > 1 {
+ argType = multipleArg
+ }
+
+ for _, path := range paths {
+ switch dir, err := os.Stat(path); {
+ case err != nil:
+ report(err)
+ case dir.IsDir():
+ walkDir(path)
+ default:
+ if err := processFile(path, nil, os.Stdout, argType); err != nil {
+ report(err)
+ }
+ }
+ }
+}
+
+func writeTempFile(dir, prefix string, data []byte) (string, error) {
+ file, err := ioutil.TempFile(dir, prefix)
+ if err != nil {
+ return "", err
+ }
+ _, err = file.Write(data)
+ if err1 := file.Close(); err == nil {
+ err = err1
+ }
+ if err != nil {
+ os.Remove(file.Name())
+ return "", err
+ }
+ return file.Name(), nil
+}
+
+func diff(b1, b2 []byte, filename string) (data []byte, err error) {
+ f1, err := writeTempFile("", "gofmt", b1)
+ if err != nil {
+ return
+ }
+ defer os.Remove(f1)
+
+ f2, err := writeTempFile("", "gofmt", b2)
+ if err != nil {
+ return
+ }
+ defer os.Remove(f2)
+
+ cmd := "diff"
+ if runtime.GOOS == "plan9" {
+ cmd = "/bin/ape/diff"
+ }
+
+ data, err = exec.Command(cmd, "-u", f1, f2).CombinedOutput()
+ if len(data) > 0 {
+ // diff exits with a non-zero status when the files don't match.
+ // Ignore that failure as long as we get output.
+ return replaceTempFilename(data, filename)
+ }
+ return
+}
+
+// replaceTempFilename replaces temporary filenames in diff with actual one.
+//
+// --- /tmp/gofmt316145376 2017-02-03 19:13:00.280468375 -0500
+// +++ /tmp/gofmt617882815 2017-02-03 19:13:00.280468375 -0500
+// ...
+// ->
+// --- path/to/file.go.orig 2017-02-03 19:13:00.280468375 -0500
+// +++ path/to/file.go 2017-02-03 19:13:00.280468375 -0500
+// ...
+func replaceTempFilename(diff []byte, filename string) ([]byte, error) {
+ bs := bytes.SplitN(diff, []byte{'\n'}, 3)
+ if len(bs) < 3 {
+ return nil, fmt.Errorf("got unexpected diff for %s", filename)
+ }
+ // Preserve timestamps.
+ var t0, t1 []byte
+ if i := bytes.LastIndexByte(bs[0], '\t'); i != -1 {
+ t0 = bs[0][i:]
+ }
+ if i := bytes.LastIndexByte(bs[1], '\t'); i != -1 {
+ t1 = bs[1][i:]
+ }
+ // Always print filepath with slash separator.
+ f := filepath.ToSlash(filename)
+ bs[0] = []byte(fmt.Sprintf("--- %s%s", f+".orig", t0))
+ bs[1] = []byte(fmt.Sprintf("+++ %s%s", f, t1))
+ return bytes.Join(bs, []byte{'\n'}), nil
+}
+
+// isFile reports whether name is a file.
+func isFile(name string) bool {
+ fi, err := os.Stat(name)
+ return err == nil && fi.Mode().IsRegular()
+}
+
+// isDir reports whether name is a directory.
+func isDir(name string) bool {
+ fi, err := os.Stat(name)
+ return err == nil && fi.IsDir()
+}
diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go b/vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go
new file mode 100644
index 00000000..190a5653
--- /dev/null
+++ b/vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go
@@ -0,0 +1,27 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc
+// +build gc
+
+package main
+
+import (
+ "flag"
+ "runtime/trace"
+)
+
+var traceProfile = flag.String("trace", "", "trace profile output")
+
+func doTrace() func() {
+ if *traceProfile != "" {
+ bw, flush := bufferedFileWriter(*traceProfile)
+ trace.Start(bw)
+ return func() {
+ flush()
+ trace.Stop()
+ }
+ }
+ return func() {}
+}
diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports_not_gc.go b/vendor/golang.org/x/tools/cmd/goimports/goimports_not_gc.go
new file mode 100644
index 00000000..344fe757
--- /dev/null
+++ b/vendor/golang.org/x/tools/cmd/goimports/goimports_not_gc.go
@@ -0,0 +1,12 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !gc
+// +build !gc
+
+package main
+
+func doTrace() func() {
+ return func() {}
+}
diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
new file mode 100644
index 00000000..be8f5a86
--- /dev/null
+++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
@@ -0,0 +1,762 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package objectpath defines a naming scheme for types.Objects
+// (that is, named entities in Go programs) relative to their enclosing
+// package.
+//
+// Type-checker objects are canonical, so they are usually identified by
+// their address in memory (a pointer), but a pointer has meaning only
+// within one address space. By contrast, objectpath names allow the
+// identity of an object to be sent from one program to another,
+// establishing a correspondence between types.Object variables that are
+// distinct but logically equivalent.
+//
+// A single object may have multiple paths. In this example,
+//
+// type A struct{ X int }
+// type B A
+//
+// the field X has two paths due to its membership of both A and B.
+// The For(obj) function always returns one of these paths, arbitrarily
+// but consistently.
+package objectpath
+
+import (
+ "fmt"
+ "go/types"
+ "sort"
+ "strconv"
+ "strings"
+
+ "golang.org/x/tools/internal/typeparams"
+
+ _ "unsafe" // for go:linkname
+)
+
+// A Path is an opaque name that identifies a types.Object
+// relative to its package. Conceptually, the name consists of a
+// sequence of destructuring operations applied to the package scope
+// to obtain the original object.
+// The name does not include the package itself.
+type Path string
+
+// Encoding
+//
+// An object path is a textual and (with training) human-readable encoding
+// of a sequence of destructuring operators, starting from a types.Package.
+// The sequences represent a path through the package/object/type graph.
+// We classify these operators by their type:
+//
+// PO package->object Package.Scope.Lookup
+// OT object->type Object.Type
+// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU]
+// TO type->object Type.{At,Field,Method,Obj} [AFMO]
+//
+// All valid paths start with a package and end at an object
+// and thus may be defined by the regular language:
+//
+// objectpath = PO (OT TT* TO)*
+//
+// The concrete encoding follows directly:
+// - The only PO operator is Package.Scope.Lookup, which requires an identifier.
+// - The only OT operator is Object.Type,
+// which we encode as '.' because dot cannot appear in an identifier.
+// - The TT operators are encoded as [EKPRUTC];
+// one of these (TypeParam) requires an integer operand,
+// which is encoded as a string of decimal digits.
+// - The TO operators are encoded as [AFMO];
+// three of these (At,Field,Method) require an integer operand,
+// which is encoded as a string of decimal digits.
+// These indices are stable across different representations
+// of the same package, even source and export data.
+// The indices used are implementation specific and may not correspond to
+// the argument to the go/types function.
+//
+// In the example below,
+//
+// package p
+//
+// type T interface {
+// f() (a string, b struct{ X int })
+// }
+//
+// field X has the path "T.UM0.RA1.F0",
+// representing the following sequence of operations:
+//
+// p.Lookup("T") T
+// .Type().Underlying().Method(0). f
+// .Type().Results().At(1) b
+// .Type().Field(0) X
+//
+// The encoding is not maximally compact---every R or P is
+// followed by an A, for example---but this simplifies the
+// encoder and decoder.
+const (
+ // object->type operators
+ opType = '.' // .Type() (Object)
+
+ // type->type operators
+ opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map)
+ opKey = 'K' // .Key() (Map)
+ opParams = 'P' // .Params() (Signature)
+ opResults = 'R' // .Results() (Signature)
+ opUnderlying = 'U' // .Underlying() (Named)
+ opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature)
+ opConstraint = 'C' // .Constraint() (TypeParam)
+
+ // type->object operators
+ opAt = 'A' // .At(i) (Tuple)
+ opField = 'F' // .Field(i) (Struct)
+ opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored)
+ opObj = 'O' // .Obj() (Named, TypeParam)
+)
+
+// For returns the path to an object relative to its package,
+// or an error if the object is not accessible from the package's Scope.
+//
+// The For function guarantees to return a path only for the following objects:
+// - package-level types
+// - exported package-level non-types
+// - methods
+// - parameter and result variables
+// - struct fields
+// These objects are sufficient to define the API of their package.
+// The objects described by a package's export data are drawn from this set.
+//
+// For does not return a path for predeclared names, imported package
+// names, local names, and unexported package-level names (except
+// types).
+//
+// Example: given this definition,
+//
+// package p
+//
+// type T interface {
+// f() (a string, b struct{ X int })
+// }
+//
+// For(X) would return a path that denotes the following sequence of operations:
+//
+// p.Scope().Lookup("T") (TypeName T)
+// .Type().Underlying().Method(0). (method Func f)
+// .Type().Results().At(1) (field Var b)
+// .Type().Field(0) (field Var X)
+//
+// where p is the package (*types.Package) to which X belongs.
+func For(obj types.Object) (Path, error) {
+ return newEncoderFor()(obj)
+}
+
+// An encoder amortizes the cost of encoding the paths of multiple objects.
+// Nonexported pending approval of proposal 58668.
+type encoder struct {
+ scopeNamesMemo map[*types.Scope][]string // memoization of Scope.Names()
+ namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods()
+}
+
+// Exposed to gopls via golang.org/x/tools/internal/typesinternal
+// pending approval of proposal 58668.
+//
+//go:linkname newEncoderFor
+func newEncoderFor() func(types.Object) (Path, error) { return new(encoder).For }
+
+func (enc *encoder) For(obj types.Object) (Path, error) {
+ pkg := obj.Pkg()
+
+ // This table lists the cases of interest.
+ //
+ // Object Action
+ // ------ ------
+ // nil reject
+ // builtin reject
+ // pkgname reject
+ // label reject
+ // var
+ // package-level accept
+ // func param/result accept
+ // local reject
+ // struct field accept
+ // const
+ // package-level accept
+ // local reject
+ // func
+ // package-level accept
+ // init functions reject
+ // concrete method accept
+ // interface method accept
+ // type
+ // package-level accept
+ // local reject
+ //
+ // The only accessible package-level objects are members of pkg itself.
+ //
+ // The cases are handled in four steps:
+ //
+ // 1. reject nil and builtin
+ // 2. accept package-level objects
+ // 3. reject obviously invalid objects
+ // 4. search the API for the path to the param/result/field/method.
+
+ // 1. reference to nil or builtin?
+ if pkg == nil {
+ return "", fmt.Errorf("predeclared %s has no path", obj)
+ }
+ scope := pkg.Scope()
+
+ // 2. package-level object?
+ if scope.Lookup(obj.Name()) == obj {
+ // Only exported objects (and non-exported types) have a path.
+ // Non-exported types may be referenced by other objects.
+ if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() {
+ return "", fmt.Errorf("no path for non-exported %v", obj)
+ }
+ return Path(obj.Name()), nil
+ }
+
+ // 3. Not a package-level object.
+ // Reject obviously non-viable cases.
+ switch obj := obj.(type) {
+ case *types.TypeName:
+ if _, ok := obj.Type().(*typeparams.TypeParam); !ok {
+ // With the exception of type parameters, only package-level type names
+ // have a path.
+ return "", fmt.Errorf("no path for %v", obj)
+ }
+ case *types.Const, // Only package-level constants have a path.
+ *types.Label, // Labels are function-local.
+ *types.PkgName: // PkgNames are file-local.
+ return "", fmt.Errorf("no path for %v", obj)
+
+ case *types.Var:
+ // Could be:
+ // - a field (obj.IsField())
+ // - a func parameter or result
+ // - a local var.
+ // Sadly there is no way to distinguish
+ // a param/result from a local
+ // so we must proceed to the find.
+
+ case *types.Func:
+ // A func, if not package-level, must be a method.
+ if recv := obj.Type().(*types.Signature).Recv(); recv == nil {
+ return "", fmt.Errorf("func is not a method: %v", obj)
+ }
+
+ if path, ok := enc.concreteMethod(obj); ok {
+ // Fast path for concrete methods that avoids looping over scope.
+ return path, nil
+ }
+
+ default:
+ panic(obj)
+ }
+
+ // 4. Search the API for the path to the var (field/param/result) or method.
+
+ // First inspect package-level named types.
+ // In the presence of path aliases, these give
+ // the best paths because non-types may
+ // refer to types, but not the reverse.
+ empty := make([]byte, 0, 48) // initial space
+ names := enc.scopeNames(scope)
+ for _, name := range names {
+ o := scope.Lookup(name)
+ tname, ok := o.(*types.TypeName)
+ if !ok {
+ continue // handle non-types in second pass
+ }
+
+ path := append(empty, name...)
+ path = append(path, opType)
+
+ T := o.Type()
+
+ if tname.IsAlias() {
+ // type alias
+ if r := find(obj, T, path, nil); r != nil {
+ return Path(r), nil
+ }
+ } else {
+ if named, _ := T.(*types.Named); named != nil {
+ if r := findTypeParam(obj, typeparams.ForNamed(named), path, nil); r != nil {
+ // generic named type
+ return Path(r), nil
+ }
+ }
+ // defined (named) type
+ if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil {
+ return Path(r), nil
+ }
+ }
+ }
+
+ // Then inspect everything else:
+ // non-types, and declared methods of defined types.
+ for _, name := range names {
+ o := scope.Lookup(name)
+ path := append(empty, name...)
+ if _, ok := o.(*types.TypeName); !ok {
+ if o.Exported() {
+ // exported non-type (const, var, func)
+ if r := find(obj, o.Type(), append(path, opType), nil); r != nil {
+ return Path(r), nil
+ }
+ }
+ continue
+ }
+
+ // Inspect declared methods of defined types.
+ if T, ok := o.Type().(*types.Named); ok {
+ path = append(path, opType)
+ // Note that method index here is always with respect
+ // to canonical ordering of methods, regardless of how
+ // they appear in the underlying type.
+ for i, m := range enc.namedMethods(T) {
+ path2 := appendOpArg(path, opMethod, i)
+ if m == obj {
+ return Path(path2), nil // found declared method
+ }
+ if r := find(obj, m.Type(), append(path2, opType), nil); r != nil {
+ return Path(r), nil
+ }
+ }
+ }
+ }
+
+ return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path())
+}
+
+func appendOpArg(path []byte, op byte, arg int) []byte {
+ path = append(path, op)
+ path = strconv.AppendInt(path, int64(arg), 10)
+ return path
+}
+
+// concreteMethod returns the path for meth, which must have a non-nil receiver.
+// The second return value indicates success and may be false if the method is
+// an interface method or if it is an instantiated method.
+//
+// This function is just an optimization that avoids the general scope walking
+// approach. You are expected to fall back to the general approach if this
+// function fails.
+func (enc *encoder) concreteMethod(meth *types.Func) (Path, bool) {
+ // Concrete methods can only be declared on package-scoped named types. For
+ // that reason we can skip the expensive walk over the package scope: the
+ // path will always be package -> named type -> method. We can trivially get
+ // the type name from the receiver, and only have to look over the type's
+ // methods to find the method index.
+ //
+ // Methods on generic types require special consideration, however. Consider
+ // the following package:
+ //
+ // L1: type S[T any] struct{}
+ // L2: func (recv S[A]) Foo() { recv.Bar() }
+ // L3: func (recv S[B]) Bar() { }
+ // L4: type Alias = S[int]
+ // L5: func _[T any]() { var s S[int]; s.Foo() }
+ //
+ // The receivers of methods on generic types are instantiations. L2 and L3
+ // instantiate S with the type-parameters A and B, which are scoped to the
+ // respective methods. L4 and L5 each instantiate S with int. Each of these
+ // instantiations has its own method set, full of methods (and thus objects)
+ // with receivers whose types are the respective instantiations. In other
+ // words, we have
+ //
+ // S[A].Foo, S[A].Bar
+ // S[B].Foo, S[B].Bar
+ // S[int].Foo, S[int].Bar
+ //
+ // We may thus be trying to produce object paths for any of these objects.
+ //
+ // S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo
+ // and S.Bar, which are the paths that this function naturally produces.
+ //
+ // S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that
+ // don't correspond to the origin methods. For S[int], this is significant.
+ // The most precise object path for S[int].Foo, for example, is Alias.Foo,
+ // not S.Foo. Our function, however, would produce S.Foo, which would
+ // resolve to a different object.
+ //
+ // For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are
+ // still the correct paths, since only the origin methods have meaningful
+ // paths. But this is likely only true for trivial cases and has edge cases.
+ // Since this function is only an optimization, we err on the side of giving
+ // up, deferring to the slower but definitely correct algorithm. Most users
+ // of objectpath will only be giving us origin methods, anyway, as referring
+ // to instantiated methods is usually not useful.
+
+ if typeparams.OriginMethod(meth) != meth {
+ return "", false
+ }
+
+ recvT := meth.Type().(*types.Signature).Recv().Type()
+ if ptr, ok := recvT.(*types.Pointer); ok {
+ recvT = ptr.Elem()
+ }
+
+ named, ok := recvT.(*types.Named)
+ if !ok {
+ return "", false
+ }
+
+ if types.IsInterface(named) {
+ // Named interfaces don't have to be package-scoped
+ //
+ // TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface
+ // methods, too, I think.
+ return "", false
+ }
+
+ // Preallocate space for the name, opType, opMethod, and some digits.
+ name := named.Obj().Name()
+ path := make([]byte, 0, len(name)+8)
+ path = append(path, name...)
+ path = append(path, opType)
+ for i, m := range enc.namedMethods(named) {
+ if m == meth {
+ path = appendOpArg(path, opMethod, i)
+ return Path(path), true
+ }
+ }
+
+ panic(fmt.Sprintf("couldn't find method %s on type %s", meth, named))
+}
+
+// find finds obj within type T, returning the path to it, or nil if not found.
+//
+// The seen map is used to short circuit cycles through type parameters. If
+// nil, it will be allocated as necessary.
+func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte {
+ switch T := T.(type) {
+ case *types.Basic, *types.Named:
+ // Named types belonging to pkg were handled already,
+ // so T must belong to another package. No path.
+ return nil
+ case *types.Pointer:
+ return find(obj, T.Elem(), append(path, opElem), seen)
+ case *types.Slice:
+ return find(obj, T.Elem(), append(path, opElem), seen)
+ case *types.Array:
+ return find(obj, T.Elem(), append(path, opElem), seen)
+ case *types.Chan:
+ return find(obj, T.Elem(), append(path, opElem), seen)
+ case *types.Map:
+ if r := find(obj, T.Key(), append(path, opKey), seen); r != nil {
+ return r
+ }
+ return find(obj, T.Elem(), append(path, opElem), seen)
+ case *types.Signature:
+ if r := findTypeParam(obj, typeparams.ForSignature(T), path, seen); r != nil {
+ return r
+ }
+ if r := find(obj, T.Params(), append(path, opParams), seen); r != nil {
+ return r
+ }
+ return find(obj, T.Results(), append(path, opResults), seen)
+ case *types.Struct:
+ for i := 0; i < T.NumFields(); i++ {
+ fld := T.Field(i)
+ path2 := appendOpArg(path, opField, i)
+ if fld == obj {
+ return path2 // found field var
+ }
+ if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil {
+ return r
+ }
+ }
+ return nil
+ case *types.Tuple:
+ for i := 0; i < T.Len(); i++ {
+ v := T.At(i)
+ path2 := appendOpArg(path, opAt, i)
+ if v == obj {
+ return path2 // found param/result var
+ }
+ if r := find(obj, v.Type(), append(path2, opType), seen); r != nil {
+ return r
+ }
+ }
+ return nil
+ case *types.Interface:
+ for i := 0; i < T.NumMethods(); i++ {
+ m := T.Method(i)
+ path2 := appendOpArg(path, opMethod, i)
+ if m == obj {
+ return path2 // found interface method
+ }
+ if r := find(obj, m.Type(), append(path2, opType), seen); r != nil {
+ return r
+ }
+ }
+ return nil
+ case *typeparams.TypeParam:
+ name := T.Obj()
+ if name == obj {
+ return append(path, opObj)
+ }
+ if seen[name] {
+ return nil
+ }
+ if seen == nil {
+ seen = make(map[*types.TypeName]bool)
+ }
+ seen[name] = true
+ if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil {
+ return r
+ }
+ return nil
+ }
+ panic(T)
+}
+
+func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte {
+ for i := 0; i < list.Len(); i++ {
+ tparam := list.At(i)
+ path2 := appendOpArg(path, opTypeParam, i)
+ if r := find(obj, tparam, path2, seen); r != nil {
+ return r
+ }
+ }
+ return nil
+}
+
+// Object returns the object denoted by path p within the package pkg.
+func Object(pkg *types.Package, p Path) (types.Object, error) {
+ if p == "" {
+ return nil, fmt.Errorf("empty path")
+ }
+
+ pathstr := string(p)
+ var pkgobj, suffix string
+ if dot := strings.IndexByte(pathstr, opType); dot < 0 {
+ pkgobj = pathstr
+ } else {
+ pkgobj = pathstr[:dot]
+ suffix = pathstr[dot:] // suffix starts with "."
+ }
+
+ obj := pkg.Scope().Lookup(pkgobj)
+ if obj == nil {
+ return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj)
+ }
+
+ // abstraction of *types.{Pointer,Slice,Array,Chan,Map}
+ type hasElem interface {
+ Elem() types.Type
+ }
+ // abstraction of *types.{Named,Signature}
+ type hasTypeParams interface {
+ TypeParams() *typeparams.TypeParamList
+ }
+ // abstraction of *types.{Named,TypeParam}
+ type hasObj interface {
+ Obj() *types.TypeName
+ }
+
+ // The loop state is the pair (t, obj),
+ // exactly one of which is non-nil, initially obj.
+ // All suffixes start with '.' (the only object->type operation),
+ // followed by optional type->type operations,
+ // then a type->object operation.
+ // The cycle then repeats.
+ var t types.Type
+ for suffix != "" {
+ code := suffix[0]
+ suffix = suffix[1:]
+
+ // Codes [AFM] have an integer operand.
+ var index int
+ switch code {
+ case opAt, opField, opMethod, opTypeParam:
+ rest := strings.TrimLeft(suffix, "0123456789")
+ numerals := suffix[:len(suffix)-len(rest)]
+ suffix = rest
+ i, err := strconv.Atoi(numerals)
+ if err != nil {
+ return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code)
+ }
+ index = int(i)
+ case opObj:
+ // no operand
+ default:
+ // The suffix must end with a type->object operation.
+ if suffix == "" {
+ return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code)
+ }
+ }
+
+ if code == opType {
+ if t != nil {
+ return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType)
+ }
+ t = obj.Type()
+ obj = nil
+ continue
+ }
+
+ if t == nil {
+ return nil, fmt.Errorf("invalid path: code %q in object context", code)
+ }
+
+ // Inv: t != nil, obj == nil
+
+ switch code {
+ case opElem:
+ hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t)
+ }
+ t = hasElem.Elem()
+
+ case opKey:
+ mapType, ok := t.(*types.Map)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t)
+ }
+ t = mapType.Key()
+
+ case opParams:
+ sig, ok := t.(*types.Signature)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
+ }
+ t = sig.Params()
+
+ case opResults:
+ sig, ok := t.(*types.Signature)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
+ }
+ t = sig.Results()
+
+ case opUnderlying:
+ named, ok := t.(*types.Named)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t)
+ }
+ t = named.Underlying()
+
+ case opTypeParam:
+ hasTypeParams, ok := t.(hasTypeParams) // Named, Signature
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t)
+ }
+ tparams := hasTypeParams.TypeParams()
+ if n := tparams.Len(); index >= n {
+ return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
+ }
+ t = tparams.At(index)
+
+ case opConstraint:
+ tparam, ok := t.(*typeparams.TypeParam)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t)
+ }
+ t = tparam.Constraint()
+
+ case opAt:
+ tuple, ok := t.(*types.Tuple)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t)
+ }
+ if n := tuple.Len(); index >= n {
+ return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
+ }
+ obj = tuple.At(index)
+ t = nil
+
+ case opField:
+ structType, ok := t.(*types.Struct)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t)
+ }
+ if n := structType.NumFields(); index >= n {
+ return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n)
+ }
+ obj = structType.Field(index)
+ t = nil
+
+ case opMethod:
+ switch t := t.(type) {
+ case *types.Interface:
+ if index >= t.NumMethods() {
+ return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods())
+ }
+ obj = t.Method(index) // Id-ordered
+
+ case *types.Named:
+ methods := namedMethods(t) // (unmemoized)
+ if index >= len(methods) {
+ return nil, fmt.Errorf("method index %d out of range [0-%d)", index, len(methods))
+ }
+ obj = methods[index] // Id-ordered
+
+ default:
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t)
+ }
+ t = nil
+
+ case opObj:
+ hasObj, ok := t.(hasObj)
+ if !ok {
+ return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t)
+ }
+ obj = hasObj.Obj()
+ t = nil
+
+ default:
+ return nil, fmt.Errorf("invalid path: unknown code %q", code)
+ }
+ }
+
+ if obj.Pkg() != pkg {
+ return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj)
+ }
+
+ return obj, nil // success
+}
+
+// namedMethods returns the methods of a Named type in ascending Id order.
+func namedMethods(named *types.Named) []*types.Func {
+ methods := make([]*types.Func, named.NumMethods())
+ for i := range methods {
+ methods[i] = named.Method(i)
+ }
+ sort.Slice(methods, func(i, j int) bool {
+ return methods[i].Id() < methods[j].Id()
+ })
+ return methods
+}
+
+// scopeNames is a memoization of scope.Names. Callers must not modify the result.
+func (enc *encoder) scopeNames(scope *types.Scope) []string {
+ m := enc.scopeNamesMemo
+ if m == nil {
+ m = make(map[*types.Scope][]string)
+ enc.scopeNamesMemo = m
+ }
+ names, ok := m[scope]
+ if !ok {
+ names = scope.Names() // allocates and sorts
+ m[scope] = names
+ }
+ return names
+}
+
+// namedMethods is a memoization of the namedMethods function. Callers must not modify the result.
+func (enc *encoder) namedMethods(named *types.Named) []*types.Func {
+ m := enc.namedMethodsMemo
+ if m == nil {
+ m = make(map[*types.Named][]*types.Func)
+ enc.namedMethodsMemo = m
+ }
+ methods, ok := m[named]
+ if !ok {
+ methods = namedMethods(named) // allocates and sorts
+ m[named] = methods
+ }
+ return methods
+
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
index 0372fb3a..a973dece 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
@@ -7,6 +7,18 @@
// Package gcimporter provides various functions for reading
// gc-generated object files that can be used to implement the
// Importer interface defined by the Go 1.5 standard library package.
+//
+// The encoding is deterministic: if the encoder is applied twice to
+// the same types.Package data structure, both encodings are equal.
+// This property may be important to avoid spurious changes in
+// applications such as build systems.
+//
+// However, the encoder is not necessarily idempotent. Importing an
+// exported package may yield a types.Package that, while it
+// represents the same set of Go types as the original, may differ in
+// the details of its internal representation. Because of these
+// differences, re-encoding the imported package may yield a
+// different, but equally valid, encoding of the package.
package gcimporter // import "golang.org/x/tools/internal/gcimporter"
import (
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
index b285a11c..34fc783f 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
@@ -12,6 +12,7 @@ package gcimporter
import (
"go/token"
"go/types"
+ "sort"
"strings"
"golang.org/x/tools/internal/pkgbits"
@@ -121,6 +122,16 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st
iface.Complete()
}
+ // Imports() of pkg are all of the transitive packages that were loaded.
+ var imps []*types.Package
+ for _, imp := range pr.pkgs {
+ if imp != nil && imp != pkg {
+ imps = append(imps, imp)
+ }
+ }
+ sort.Sort(byPath(imps))
+ pkg.SetImports(imps)
+
pkg.MarkComplete()
return pkg
}
@@ -260,39 +271,9 @@ func (r *reader) doPkg() *types.Package {
pkg := types.NewPackage(path, name)
r.p.imports[path] = pkg
- imports := make([]*types.Package, r.Len())
- for i := range imports {
- imports[i] = r.pkg()
- }
- pkg.SetImports(flattenImports(imports))
-
return pkg
}
-// flattenImports returns the transitive closure of all imported
-// packages rooted from pkgs.
-func flattenImports(pkgs []*types.Package) []*types.Package {
- var res []*types.Package
- seen := make(map[*types.Package]struct{})
- for _, pkg := range pkgs {
- if _, ok := seen[pkg]; ok {
- continue
- }
- seen[pkg] = struct{}{}
- res = append(res, pkg)
-
- // pkg.Imports() is already flattened.
- for _, pkg := range pkg.Imports() {
- if _, ok := seen[pkg]; ok {
- continue
- }
- seen[pkg] = struct{}{}
- res = append(res, pkg)
- }
- }
- return res
-}
-
// @@@ Types
func (r *reader) typ() types.Type {
diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go
index 25a1426d..cfba8189 100644
--- a/vendor/golang.org/x/tools/internal/typeparams/common.go
+++ b/vendor/golang.org/x/tools/internal/typeparams/common.go
@@ -87,7 +87,6 @@ func IsTypeParam(t types.Type) bool {
func OriginMethod(fn *types.Func) *types.Func {
recv := fn.Type().(*types.Signature).Recv()
if recv == nil {
-
return fn
}
base := recv.Type()
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go
index ce7d4351..3c53fbc6 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/types.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go
@@ -11,6 +11,8 @@ import (
"go/types"
"reflect"
"unsafe"
+
+ "golang.org/x/tools/go/types/objectpath"
)
func SetUsesCgo(conf *types.Config) bool {
@@ -50,3 +52,10 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos,
}
var SetGoVersion = func(conf *types.Config, version string) bool { return false }
+
+// NewObjectpathEncoder returns a function closure equivalent to
+// objectpath.For but amortized for multiple (sequential) calls.
+// It is a temporary workaround, pending the approval of proposal 58668.
+//
+//go:linkname NewObjectpathFunc golang.org/x/tools/go/types/objectpath.newEncoderFor
+func NewObjectpathFunc() func(types.Object) (objectpath.Path, error)
diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/init.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/init.go
new file mode 100644
index 00000000..369df13d
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/init.go
@@ -0,0 +1,168 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal_gengo
+
+import (
+ "unicode"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/compiler/protogen"
+ "google.golang.org/protobuf/encoding/protowire"
+
+ "google.golang.org/protobuf/types/descriptorpb"
+)
+
+type fileInfo struct {
+ *protogen.File
+
+ allEnums []*enumInfo
+ allMessages []*messageInfo
+ allExtensions []*extensionInfo
+
+ allEnumsByPtr map[*enumInfo]int // value is index into allEnums
+ allMessagesByPtr map[*messageInfo]int // value is index into allMessages
+ allMessageFieldsByPtr map[*messageInfo]*structFields
+
+ // needRawDesc specifies whether the generator should emit logic to provide
+ // the legacy raw descriptor in GZIP'd form.
+ // This is updated by enum and message generation logic as necessary,
+ // and checked at the end of file generation.
+ needRawDesc bool
+}
+
+type structFields struct {
+ count int
+ unexported map[int]string
+}
+
+func (sf *structFields) append(name string) {
+ if r, _ := utf8.DecodeRuneInString(name); !unicode.IsUpper(r) {
+ if sf.unexported == nil {
+ sf.unexported = make(map[int]string)
+ }
+ sf.unexported[sf.count] = name
+ }
+ sf.count++
+}
+
+func newFileInfo(file *protogen.File) *fileInfo {
+ f := &fileInfo{File: file}
+
+ // Collect all enums, messages, and extensions in "flattened ordering".
+ // See filetype.TypeBuilder.
+ var walkMessages func([]*protogen.Message, func(*protogen.Message))
+ walkMessages = func(messages []*protogen.Message, f func(*protogen.Message)) {
+ for _, m := range messages {
+ f(m)
+ walkMessages(m.Messages, f)
+ }
+ }
+ initEnumInfos := func(enums []*protogen.Enum) {
+ for _, enum := range enums {
+ f.allEnums = append(f.allEnums, newEnumInfo(f, enum))
+ }
+ }
+ initMessageInfos := func(messages []*protogen.Message) {
+ for _, message := range messages {
+ f.allMessages = append(f.allMessages, newMessageInfo(f, message))
+ }
+ }
+ initExtensionInfos := func(extensions []*protogen.Extension) {
+ for _, extension := range extensions {
+ f.allExtensions = append(f.allExtensions, newExtensionInfo(f, extension))
+ }
+ }
+ initEnumInfos(f.Enums)
+ initMessageInfos(f.Messages)
+ initExtensionInfos(f.Extensions)
+ walkMessages(f.Messages, func(m *protogen.Message) {
+ initEnumInfos(m.Enums)
+ initMessageInfos(m.Messages)
+ initExtensionInfos(m.Extensions)
+ })
+
+ // Derive a reverse mapping of enum and message pointers to their index
+ // in allEnums and allMessages.
+ if len(f.allEnums) > 0 {
+ f.allEnumsByPtr = make(map[*enumInfo]int)
+ for i, e := range f.allEnums {
+ f.allEnumsByPtr[e] = i
+ }
+ }
+ if len(f.allMessages) > 0 {
+ f.allMessagesByPtr = make(map[*messageInfo]int)
+ f.allMessageFieldsByPtr = make(map[*messageInfo]*structFields)
+ for i, m := range f.allMessages {
+ f.allMessagesByPtr[m] = i
+ f.allMessageFieldsByPtr[m] = new(structFields)
+ }
+ }
+
+ return f
+}
+
+type enumInfo struct {
+ *protogen.Enum
+
+ genJSONMethod bool
+ genRawDescMethod bool
+}
+
+func newEnumInfo(f *fileInfo, enum *protogen.Enum) *enumInfo {
+ e := &enumInfo{Enum: enum}
+ e.genJSONMethod = true
+ e.genRawDescMethod = true
+ return e
+}
+
+type messageInfo struct {
+ *protogen.Message
+
+ genRawDescMethod bool
+ genExtRangeMethod bool
+
+ isTracked bool
+ hasWeak bool
+}
+
+func newMessageInfo(f *fileInfo, message *protogen.Message) *messageInfo {
+ m := &messageInfo{Message: message}
+ m.genRawDescMethod = true
+ m.genExtRangeMethod = true
+ m.isTracked = isTrackedMessage(m)
+ for _, field := range m.Fields {
+ m.hasWeak = m.hasWeak || field.Desc.IsWeak()
+ }
+ return m
+}
+
+// isTrackedMessage reports whether field tracking is enabled on the message.
+func isTrackedMessage(m *messageInfo) (tracked bool) {
+ const trackFieldUse_fieldNumber = 37383685
+
+ // Decode the option from unknown fields to avoid a dependency on the
+ // annotation proto from protoc-gen-go.
+ b := m.Desc.Options().(*descriptorpb.MessageOptions).ProtoReflect().GetUnknown()
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ if num == trackFieldUse_fieldNumber && typ == protowire.VarintType {
+ v, _ := protowire.ConsumeVarint(b)
+ tracked = protowire.DecodeBool(v)
+ }
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ return tracked
+}
+
+type extensionInfo struct {
+ *protogen.Extension
+}
+
+func newExtensionInfo(f *fileInfo, extension *protogen.Extension) *extensionInfo {
+ x := &extensionInfo{Extension: extension}
+ return x
+}
diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go
new file mode 100644
index 00000000..f8b76bf5
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go
@@ -0,0 +1,896 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal_gengo is internal to the protobuf module.
+package internal_gengo
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "math"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/compiler/protogen"
+ "google.golang.org/protobuf/internal/encoding/tag"
+ "google.golang.org/protobuf/internal/genid"
+ "google.golang.org/protobuf/internal/version"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoimpl"
+
+ "google.golang.org/protobuf/types/descriptorpb"
+ "google.golang.org/protobuf/types/pluginpb"
+)
+
+// SupportedFeatures reports the set of supported protobuf language features.
+var SupportedFeatures = uint64(pluginpb.CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL)
+
+// GenerateVersionMarkers specifies whether to generate version markers.
+var GenerateVersionMarkers = true
+
+// Standard library dependencies.
+const (
+ base64Package = protogen.GoImportPath("encoding/base64")
+ mathPackage = protogen.GoImportPath("math")
+ reflectPackage = protogen.GoImportPath("reflect")
+ sortPackage = protogen.GoImportPath("sort")
+ stringsPackage = protogen.GoImportPath("strings")
+ syncPackage = protogen.GoImportPath("sync")
+ timePackage = protogen.GoImportPath("time")
+ utf8Package = protogen.GoImportPath("unicode/utf8")
+)
+
+// Protobuf library dependencies.
+//
+// These are declared as an interface type so that they can be more easily
+// patched to support unique build environments that impose restrictions
+// on the dependencies of generated source code.
+var (
+ protoPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/proto")
+ protoifacePackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/runtime/protoiface")
+ protoimplPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/runtime/protoimpl")
+ protojsonPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/encoding/protojson")
+ protoreflectPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/reflect/protoreflect")
+ protoregistryPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/reflect/protoregistry")
+)
+
+type goImportPath interface {
+ String() string
+ Ident(string) protogen.GoIdent
+}
+
+// GenerateFile generates the contents of a .pb.go file.
+func GenerateFile(gen *protogen.Plugin, file *protogen.File) *protogen.GeneratedFile {
+ filename := file.GeneratedFilenamePrefix + ".pb.go"
+ g := gen.NewGeneratedFile(filename, file.GoImportPath)
+ f := newFileInfo(file)
+
+ genStandaloneComments(g, f, int32(genid.FileDescriptorProto_Syntax_field_number))
+ genGeneratedHeader(gen, g, f)
+ genStandaloneComments(g, f, int32(genid.FileDescriptorProto_Package_field_number))
+
+ packageDoc := genPackageKnownComment(f)
+ g.P(packageDoc, "package ", f.GoPackageName)
+ g.P()
+
+ // Emit a static check that enforces a minimum version of the proto package.
+ if GenerateVersionMarkers {
+ g.P("const (")
+ g.P("// Verify that this generated code is sufficiently up-to-date.")
+ g.P("_ = ", protoimplPackage.Ident("EnforceVersion"), "(", protoimpl.GenVersion, " - ", protoimplPackage.Ident("MinVersion"), ")")
+ g.P("// Verify that runtime/protoimpl is sufficiently up-to-date.")
+ g.P("_ = ", protoimplPackage.Ident("EnforceVersion"), "(", protoimplPackage.Ident("MaxVersion"), " - ", protoimpl.GenVersion, ")")
+ g.P(")")
+ g.P()
+ }
+
+ for i, imps := 0, f.Desc.Imports(); i < imps.Len(); i++ {
+ genImport(gen, g, f, imps.Get(i))
+ }
+ for _, enum := range f.allEnums {
+ genEnum(g, f, enum)
+ }
+ for _, message := range f.allMessages {
+ genMessage(g, f, message)
+ }
+ genExtensions(g, f)
+
+ genReflectFileDescriptor(gen, g, f)
+
+ return g
+}
+
+// genStandaloneComments prints all leading comments for a FileDescriptorProto
+// location identified by the field number n.
+func genStandaloneComments(g *protogen.GeneratedFile, f *fileInfo, n int32) {
+ loc := f.Desc.SourceLocations().ByPath(protoreflect.SourcePath{n})
+ for _, s := range loc.LeadingDetachedComments {
+ g.P(protogen.Comments(s))
+ g.P()
+ }
+ if s := loc.LeadingComments; s != "" {
+ g.P(protogen.Comments(s))
+ g.P()
+ }
+}
+
+func genGeneratedHeader(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) {
+ g.P("// Code generated by protoc-gen-go. DO NOT EDIT.")
+
+ if GenerateVersionMarkers {
+ g.P("// versions:")
+ protocGenGoVersion := version.String()
+ protocVersion := "(unknown)"
+ if v := gen.Request.GetCompilerVersion(); v != nil {
+ protocVersion = fmt.Sprintf("v%v.%v.%v", v.GetMajor(), v.GetMinor(), v.GetPatch())
+ if s := v.GetSuffix(); s != "" {
+ protocVersion += "-" + s
+ }
+ }
+ g.P("// \tprotoc-gen-go ", protocGenGoVersion)
+ g.P("// \tprotoc ", protocVersion)
+ }
+
+ if f.Proto.GetOptions().GetDeprecated() {
+ g.P("// ", f.Desc.Path(), " is a deprecated file.")
+ } else {
+ g.P("// source: ", f.Desc.Path())
+ }
+ g.P()
+}
+
+func genImport(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo, imp protoreflect.FileImport) {
+ impFile, ok := gen.FilesByPath[imp.Path()]
+ if !ok {
+ return
+ }
+ if impFile.GoImportPath == f.GoImportPath {
+ // Don't generate imports or aliases for types in the same Go package.
+ return
+ }
+ // Generate imports for all non-weak dependencies, even if they are not
+ // referenced, because other code and tools depend on having the
+ // full transitive closure of protocol buffer types in the binary.
+ if !imp.IsWeak {
+ g.Import(impFile.GoImportPath)
+ }
+ if !imp.IsPublic {
+ return
+ }
+
+ // Generate public imports by generating the imported file, parsing it,
+ // and extracting every symbol that should receive a forwarding declaration.
+ impGen := GenerateFile(gen, impFile)
+ impGen.Skip()
+ b, err := impGen.Content()
+ if err != nil {
+ gen.Error(err)
+ return
+ }
+ fset := token.NewFileSet()
+ astFile, err := parser.ParseFile(fset, "", b, parser.ParseComments)
+ if err != nil {
+ gen.Error(err)
+ return
+ }
+ genForward := func(tok token.Token, name string, expr ast.Expr) {
+ // Don't import unexported symbols.
+ r, _ := utf8.DecodeRuneInString(name)
+ if !unicode.IsUpper(r) {
+ return
+ }
+ // Don't import the FileDescriptor.
+ if name == impFile.GoDescriptorIdent.GoName {
+ return
+ }
+ // Don't import decls referencing a symbol defined in another package.
+ // i.e., don't import decls which are themselves public imports:
+ //
+ // type T = somepackage.T
+ if _, ok := expr.(*ast.SelectorExpr); ok {
+ return
+ }
+ g.P(tok, " ", name, " = ", impFile.GoImportPath.Ident(name))
+ }
+ g.P("// Symbols defined in public import of ", imp.Path(), ".")
+ g.P()
+ for _, decl := range astFile.Decls {
+ switch decl := decl.(type) {
+ case *ast.GenDecl:
+ for _, spec := range decl.Specs {
+ switch spec := spec.(type) {
+ case *ast.TypeSpec:
+ genForward(decl.Tok, spec.Name.Name, spec.Type)
+ case *ast.ValueSpec:
+ for i, name := range spec.Names {
+ var expr ast.Expr
+ if i < len(spec.Values) {
+ expr = spec.Values[i]
+ }
+ genForward(decl.Tok, name.Name, expr)
+ }
+ case *ast.ImportSpec:
+ default:
+ panic(fmt.Sprintf("can't generate forward for spec type %T", spec))
+ }
+ }
+ }
+ }
+ g.P()
+}
+
+func genEnum(g *protogen.GeneratedFile, f *fileInfo, e *enumInfo) {
+ // Enum type declaration.
+ g.Annotate(e.GoIdent.GoName, e.Location)
+ leadingComments := appendDeprecationSuffix(e.Comments.Leading,
+ e.Desc.ParentFile(),
+ e.Desc.Options().(*descriptorpb.EnumOptions).GetDeprecated())
+ g.P(leadingComments,
+ "type ", e.GoIdent, " int32")
+
+ // Enum value constants.
+ g.P("const (")
+ for _, value := range e.Values {
+ g.Annotate(value.GoIdent.GoName, value.Location)
+ leadingComments := appendDeprecationSuffix(value.Comments.Leading,
+ value.Desc.ParentFile(),
+ value.Desc.Options().(*descriptorpb.EnumValueOptions).GetDeprecated())
+ g.P(leadingComments,
+ value.GoIdent, " ", e.GoIdent, " = ", value.Desc.Number(),
+ trailingComment(value.Comments.Trailing))
+ }
+ g.P(")")
+ g.P()
+
+ // Enum value maps.
+ g.P("// Enum value maps for ", e.GoIdent, ".")
+ g.P("var (")
+ g.P(e.GoIdent.GoName+"_name", " = map[int32]string{")
+ for _, value := range e.Values {
+ duplicate := ""
+ if value.Desc != e.Desc.Values().ByNumber(value.Desc.Number()) {
+ duplicate = "// Duplicate value: "
+ }
+ g.P(duplicate, value.Desc.Number(), ": ", strconv.Quote(string(value.Desc.Name())), ",")
+ }
+ g.P("}")
+ g.P(e.GoIdent.GoName+"_value", " = map[string]int32{")
+ for _, value := range e.Values {
+ g.P(strconv.Quote(string(value.Desc.Name())), ": ", value.Desc.Number(), ",")
+ }
+ g.P("}")
+ g.P(")")
+ g.P()
+
+ // Enum method.
+ //
+ // NOTE: A pointer value is needed to represent presence in proto2.
+ // Since a proto2 message can reference a proto3 enum, it is useful to
+ // always generate this method (even on proto3 enums) to support that case.
+ g.P("func (x ", e.GoIdent, ") Enum() *", e.GoIdent, " {")
+ g.P("p := new(", e.GoIdent, ")")
+ g.P("*p = x")
+ g.P("return p")
+ g.P("}")
+ g.P()
+
+ // String method.
+ g.P("func (x ", e.GoIdent, ") String() string {")
+ g.P("return ", protoimplPackage.Ident("X"), ".EnumStringOf(x.Descriptor(), ", protoreflectPackage.Ident("EnumNumber"), "(x))")
+ g.P("}")
+ g.P()
+
+ genEnumReflectMethods(g, f, e)
+
+ // UnmarshalJSON method.
+ if e.genJSONMethod && e.Desc.Syntax() == protoreflect.Proto2 {
+ g.P("// Deprecated: Do not use.")
+ g.P("func (x *", e.GoIdent, ") UnmarshalJSON(b []byte) error {")
+ g.P("num, err := ", protoimplPackage.Ident("X"), ".UnmarshalJSONEnum(x.Descriptor(), b)")
+ g.P("if err != nil {")
+ g.P("return err")
+ g.P("}")
+ g.P("*x = ", e.GoIdent, "(num)")
+ g.P("return nil")
+ g.P("}")
+ g.P()
+ }
+
+ // EnumDescriptor method.
+ if e.genRawDescMethod {
+ var indexes []string
+ for i := 1; i < len(e.Location.Path); i += 2 {
+ indexes = append(indexes, strconv.Itoa(int(e.Location.Path[i])))
+ }
+ g.P("// Deprecated: Use ", e.GoIdent, ".Descriptor instead.")
+ g.P("func (", e.GoIdent, ") EnumDescriptor() ([]byte, []int) {")
+ g.P("return ", rawDescVarName(f), "GZIP(), []int{", strings.Join(indexes, ","), "}")
+ g.P("}")
+ g.P()
+ f.needRawDesc = true
+ }
+}
+
+func genMessage(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
+ if m.Desc.IsMapEntry() {
+ return
+ }
+
+ // Message type declaration.
+ g.Annotate(m.GoIdent.GoName, m.Location)
+ leadingComments := appendDeprecationSuffix(m.Comments.Leading,
+ m.Desc.ParentFile(),
+ m.Desc.Options().(*descriptorpb.MessageOptions).GetDeprecated())
+ g.P(leadingComments,
+ "type ", m.GoIdent, " struct {")
+ genMessageFields(g, f, m)
+ g.P("}")
+ g.P()
+
+ genMessageKnownFunctions(g, f, m)
+ genMessageDefaultDecls(g, f, m)
+ genMessageMethods(g, f, m)
+ genMessageOneofWrapperTypes(g, f, m)
+}
+
+func genMessageFields(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
+ sf := f.allMessageFieldsByPtr[m]
+ genMessageInternalFields(g, f, m, sf)
+ for _, field := range m.Fields {
+ genMessageField(g, f, m, field, sf)
+ }
+}
+
+func genMessageInternalFields(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, sf *structFields) {
+ g.P(genid.State_goname, " ", protoimplPackage.Ident("MessageState"))
+ sf.append(genid.State_goname)
+ g.P(genid.SizeCache_goname, " ", protoimplPackage.Ident("SizeCache"))
+ sf.append(genid.SizeCache_goname)
+ if m.hasWeak {
+ g.P(genid.WeakFields_goname, " ", protoimplPackage.Ident("WeakFields"))
+ sf.append(genid.WeakFields_goname)
+ }
+ g.P(genid.UnknownFields_goname, " ", protoimplPackage.Ident("UnknownFields"))
+ sf.append(genid.UnknownFields_goname)
+ if m.Desc.ExtensionRanges().Len() > 0 {
+ g.P(genid.ExtensionFields_goname, " ", protoimplPackage.Ident("ExtensionFields"))
+ sf.append(genid.ExtensionFields_goname)
+ }
+ if sf.count > 0 {
+ g.P()
+ }
+}
+
+func genMessageField(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, field *protogen.Field, sf *structFields) {
+ if oneof := field.Oneof; oneof != nil && !oneof.Desc.IsSynthetic() {
+ // It would be a bit simpler to iterate over the oneofs below,
+ // but generating the field here keeps the contents of the Go
+ // struct in the same order as the contents of the source
+ // .proto file.
+ if oneof.Fields[0] != field {
+ return // only generate for first appearance
+ }
+
+ tags := structTags{
+ {"protobuf_oneof", string(oneof.Desc.Name())},
+ }
+ if m.isTracked {
+ tags = append(tags, gotrackTags...)
+ }
+
+ g.Annotate(m.GoIdent.GoName+"."+oneof.GoName, oneof.Location)
+ leadingComments := oneof.Comments.Leading
+ if leadingComments != "" {
+ leadingComments += "\n"
+ }
+ ss := []string{fmt.Sprintf(" Types that are assignable to %s:\n", oneof.GoName)}
+ for _, field := range oneof.Fields {
+ ss = append(ss, "\t*"+field.GoIdent.GoName+"\n")
+ }
+ leadingComments += protogen.Comments(strings.Join(ss, ""))
+ g.P(leadingComments,
+ oneof.GoName, " ", oneofInterfaceName(oneof), tags)
+ sf.append(oneof.GoName)
+ return
+ }
+ goType, pointer := fieldGoType(g, f, field)
+ if pointer {
+ goType = "*" + goType
+ }
+ tags := structTags{
+ {"protobuf", fieldProtobufTagValue(field)},
+ {"json", fieldJSONTagValue(field)},
+ }
+ if field.Desc.IsMap() {
+ key := field.Message.Fields[0]
+ val := field.Message.Fields[1]
+ tags = append(tags, structTags{
+ {"protobuf_key", fieldProtobufTagValue(key)},
+ {"protobuf_val", fieldProtobufTagValue(val)},
+ }...)
+ }
+ if m.isTracked {
+ tags = append(tags, gotrackTags...)
+ }
+
+ name := field.GoName
+ if field.Desc.IsWeak() {
+ name = genid.WeakFieldPrefix_goname + name
+ }
+ g.Annotate(m.GoIdent.GoName+"."+name, field.Location)
+ leadingComments := appendDeprecationSuffix(field.Comments.Leading,
+ field.Desc.ParentFile(),
+ field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated())
+ g.P(leadingComments,
+ name, " ", goType, tags,
+ trailingComment(field.Comments.Trailing))
+ sf.append(field.GoName)
+}
+
+// genMessageDefaultDecls generates consts and vars holding the default
+// values of fields.
+func genMessageDefaultDecls(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
+ var consts, vars []string
+ for _, field := range m.Fields {
+ if !field.Desc.HasDefault() {
+ continue
+ }
+ name := "Default_" + m.GoIdent.GoName + "_" + field.GoName
+ goType, _ := fieldGoType(g, f, field)
+ defVal := field.Desc.Default()
+ switch field.Desc.Kind() {
+ case protoreflect.StringKind:
+ consts = append(consts, fmt.Sprintf("%s = %s(%q)", name, goType, defVal.String()))
+ case protoreflect.BytesKind:
+ vars = append(vars, fmt.Sprintf("%s = %s(%q)", name, goType, defVal.Bytes()))
+ case protoreflect.EnumKind:
+ idx := field.Desc.DefaultEnumValue().Index()
+ val := field.Enum.Values[idx]
+ if val.GoIdent.GoImportPath == f.GoImportPath {
+ consts = append(consts, fmt.Sprintf("%s = %s", name, g.QualifiedGoIdent(val.GoIdent)))
+ } else {
+ // If the enum value is declared in a different Go package,
+ // reference it by number since the name may not be correct.
+ // See https://github.com/golang/protobuf/issues/513.
+ consts = append(consts, fmt.Sprintf("%s = %s(%d) // %s",
+ name, g.QualifiedGoIdent(field.Enum.GoIdent), val.Desc.Number(), g.QualifiedGoIdent(val.GoIdent)))
+ }
+ case protoreflect.FloatKind, protoreflect.DoubleKind:
+ if f := defVal.Float(); math.IsNaN(f) || math.IsInf(f, 0) {
+ var fn, arg string
+ switch f := defVal.Float(); {
+ case math.IsInf(f, -1):
+ fn, arg = g.QualifiedGoIdent(mathPackage.Ident("Inf")), "-1"
+ case math.IsInf(f, +1):
+ fn, arg = g.QualifiedGoIdent(mathPackage.Ident("Inf")), "+1"
+ case math.IsNaN(f):
+ fn, arg = g.QualifiedGoIdent(mathPackage.Ident("NaN")), ""
+ }
+ vars = append(vars, fmt.Sprintf("%s = %s(%s(%s))", name, goType, fn, arg))
+ } else {
+ consts = append(consts, fmt.Sprintf("%s = %s(%v)", name, goType, f))
+ }
+ default:
+ consts = append(consts, fmt.Sprintf("%s = %s(%v)", name, goType, defVal.Interface()))
+ }
+ }
+ if len(consts) > 0 {
+ g.P("// Default values for ", m.GoIdent, " fields.")
+ g.P("const (")
+ for _, s := range consts {
+ g.P(s)
+ }
+ g.P(")")
+ }
+ if len(vars) > 0 {
+ g.P("// Default values for ", m.GoIdent, " fields.")
+ g.P("var (")
+ for _, s := range vars {
+ g.P(s)
+ }
+ g.P(")")
+ }
+ g.P()
+}
+
+func genMessageMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
+ genMessageBaseMethods(g, f, m)
+ genMessageGetterMethods(g, f, m)
+ genMessageSetterMethods(g, f, m)
+}
+
+func genMessageBaseMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
+ // Reset method.
+ g.P("func (x *", m.GoIdent, ") Reset() {")
+ g.P("*x = ", m.GoIdent, "{}")
+ g.P("if ", protoimplPackage.Ident("UnsafeEnabled"), " {")
+ g.P("mi := &", messageTypesVarName(f), "[", f.allMessagesByPtr[m], "]")
+ g.P("ms := ", protoimplPackage.Ident("X"), ".MessageStateOf(", protoimplPackage.Ident("Pointer"), "(x))")
+ g.P("ms.StoreMessageInfo(mi)")
+ g.P("}")
+ g.P("}")
+ g.P()
+
+ // String method.
+ g.P("func (x *", m.GoIdent, ") String() string {")
+ g.P("return ", protoimplPackage.Ident("X"), ".MessageStringOf(x)")
+ g.P("}")
+ g.P()
+
+ // ProtoMessage method.
+ g.P("func (*", m.GoIdent, ") ProtoMessage() {}")
+ g.P()
+
+ // ProtoReflect method.
+ genMessageReflectMethods(g, f, m)
+
+ // Descriptor method.
+ if m.genRawDescMethod {
+ var indexes []string
+ for i := 1; i < len(m.Location.Path); i += 2 {
+ indexes = append(indexes, strconv.Itoa(int(m.Location.Path[i])))
+ }
+ g.P("// Deprecated: Use ", m.GoIdent, ".ProtoReflect.Descriptor instead.")
+ g.P("func (*", m.GoIdent, ") Descriptor() ([]byte, []int) {")
+ g.P("return ", rawDescVarName(f), "GZIP(), []int{", strings.Join(indexes, ","), "}")
+ g.P("}")
+ g.P()
+ f.needRawDesc = true
+ }
+}
+
+func genMessageGetterMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
+ for _, field := range m.Fields {
+ genNoInterfacePragma(g, m.isTracked)
+
+ // Getter for parent oneof.
+ if oneof := field.Oneof; oneof != nil && oneof.Fields[0] == field && !oneof.Desc.IsSynthetic() {
+ g.Annotate(m.GoIdent.GoName+".Get"+oneof.GoName, oneof.Location)
+ g.P("func (m *", m.GoIdent.GoName, ") Get", oneof.GoName, "() ", oneofInterfaceName(oneof), " {")
+ g.P("if m != nil {")
+ g.P("return m.", oneof.GoName)
+ g.P("}")
+ g.P("return nil")
+ g.P("}")
+ g.P()
+ }
+
+ // Getter for message field.
+ goType, pointer := fieldGoType(g, f, field)
+ defaultValue := fieldDefaultValue(g, f, m, field)
+ g.Annotate(m.GoIdent.GoName+".Get"+field.GoName, field.Location)
+ leadingComments := appendDeprecationSuffix("",
+ field.Desc.ParentFile(),
+ field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated())
+ switch {
+ case field.Desc.IsWeak():
+ g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", protoPackage.Ident("Message"), "{")
+ g.P("var w ", protoimplPackage.Ident("WeakFields"))
+ g.P("if x != nil {")
+ g.P("w = x.", genid.WeakFields_goname)
+ if m.isTracked {
+ g.P("_ = x.", genid.WeakFieldPrefix_goname+field.GoName)
+ }
+ g.P("}")
+ g.P("return ", protoimplPackage.Ident("X"), ".GetWeak(w, ", field.Desc.Number(), ", ", strconv.Quote(string(field.Message.Desc.FullName())), ")")
+ g.P("}")
+ case field.Oneof != nil && !field.Oneof.Desc.IsSynthetic():
+ g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", goType, " {")
+ g.P("if x, ok := x.Get", field.Oneof.GoName, "().(*", field.GoIdent, "); ok {")
+ g.P("return x.", field.GoName)
+ g.P("}")
+ g.P("return ", defaultValue)
+ g.P("}")
+ default:
+ g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", goType, " {")
+ if !field.Desc.HasPresence() || defaultValue == "nil" {
+ g.P("if x != nil {")
+ } else {
+ g.P("if x != nil && x.", field.GoName, " != nil {")
+ }
+ star := ""
+ if pointer {
+ star = "*"
+ }
+ g.P("return ", star, " x.", field.GoName)
+ g.P("}")
+ g.P("return ", defaultValue)
+ g.P("}")
+ }
+ g.P()
+ }
+}
+
+func genMessageSetterMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
+ for _, field := range m.Fields {
+ if !field.Desc.IsWeak() {
+ continue
+ }
+
+ genNoInterfacePragma(g, m.isTracked)
+
+ g.Annotate(m.GoIdent.GoName+".Set"+field.GoName, field.Location)
+ leadingComments := appendDeprecationSuffix("",
+ field.Desc.ParentFile(),
+ field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated())
+ g.P(leadingComments, "func (x *", m.GoIdent, ") Set", field.GoName, "(v ", protoPackage.Ident("Message"), ") {")
+ g.P("var w *", protoimplPackage.Ident("WeakFields"))
+ g.P("if x != nil {")
+ g.P("w = &x.", genid.WeakFields_goname)
+ if m.isTracked {
+ g.P("_ = x.", genid.WeakFieldPrefix_goname+field.GoName)
+ }
+ g.P("}")
+ g.P(protoimplPackage.Ident("X"), ".SetWeak(w, ", field.Desc.Number(), ", ", strconv.Quote(string(field.Message.Desc.FullName())), ", v)")
+ g.P("}")
+ g.P()
+ }
+}
+
+// fieldGoType returns the Go type used for a field.
+//
+// If it returns pointer=true, the struct field is a pointer to the type.
+func fieldGoType(g *protogen.GeneratedFile, f *fileInfo, field *protogen.Field) (goType string, pointer bool) {
+ if field.Desc.IsWeak() {
+ return "struct{}", false
+ }
+
+ pointer = field.Desc.HasPresence()
+ switch field.Desc.Kind() {
+ case protoreflect.BoolKind:
+ goType = "bool"
+ case protoreflect.EnumKind:
+ goType = g.QualifiedGoIdent(field.Enum.GoIdent)
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+ goType = "int32"
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+ goType = "uint32"
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ goType = "int64"
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ goType = "uint64"
+ case protoreflect.FloatKind:
+ goType = "float32"
+ case protoreflect.DoubleKind:
+ goType = "float64"
+ case protoreflect.StringKind:
+ goType = "string"
+ case protoreflect.BytesKind:
+ goType = "[]byte"
+ pointer = false // rely on nullability of slices for presence
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ goType = "*" + g.QualifiedGoIdent(field.Message.GoIdent)
+ pointer = false // pointer captured as part of the type
+ }
+ switch {
+ case field.Desc.IsList():
+ return "[]" + goType, false
+ case field.Desc.IsMap():
+ keyType, _ := fieldGoType(g, f, field.Message.Fields[0])
+ valType, _ := fieldGoType(g, f, field.Message.Fields[1])
+ return fmt.Sprintf("map[%v]%v", keyType, valType), false
+ }
+ return goType, pointer
+}
+
+func fieldProtobufTagValue(field *protogen.Field) string {
+ var enumName string
+ if field.Desc.Kind() == protoreflect.EnumKind {
+ enumName = protoimpl.X.LegacyEnumName(field.Enum.Desc)
+ }
+ return tag.Marshal(field.Desc, enumName)
+}
+
+func fieldDefaultValue(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, field *protogen.Field) string {
+ if field.Desc.IsList() {
+ return "nil"
+ }
+ if field.Desc.HasDefault() {
+ defVarName := "Default_" + m.GoIdent.GoName + "_" + field.GoName
+ if field.Desc.Kind() == protoreflect.BytesKind {
+ return "append([]byte(nil), " + defVarName + "...)"
+ }
+ return defVarName
+ }
+ switch field.Desc.Kind() {
+ case protoreflect.BoolKind:
+ return "false"
+ case protoreflect.StringKind:
+ return `""`
+ case protoreflect.MessageKind, protoreflect.GroupKind, protoreflect.BytesKind:
+ return "nil"
+ case protoreflect.EnumKind:
+ val := field.Enum.Values[0]
+ if val.GoIdent.GoImportPath == f.GoImportPath {
+ return g.QualifiedGoIdent(val.GoIdent)
+ } else {
+ // If the enum value is declared in a different Go package,
+ // reference it by number since the name may not be correct.
+ // See https://github.com/golang/protobuf/issues/513.
+ return g.QualifiedGoIdent(field.Enum.GoIdent) + "(" + strconv.FormatInt(int64(val.Desc.Number()), 10) + ")"
+ }
+ default:
+ return "0"
+ }
+}
+
+func fieldJSONTagValue(field *protogen.Field) string {
+ return string(field.Desc.Name()) + ",omitempty"
+}
+
+func genExtensions(g *protogen.GeneratedFile, f *fileInfo) {
+ if len(f.allExtensions) == 0 {
+ return
+ }
+
+ g.P("var ", extensionTypesVarName(f), " = []", protoimplPackage.Ident("ExtensionInfo"), "{")
+ for _, x := range f.allExtensions {
+ g.P("{")
+ g.P("ExtendedType: (*", x.Extendee.GoIdent, ")(nil),")
+ goType, pointer := fieldGoType(g, f, x.Extension)
+ if pointer {
+ goType = "*" + goType
+ }
+ g.P("ExtensionType: (", goType, ")(nil),")
+ g.P("Field: ", x.Desc.Number(), ",")
+ g.P("Name: ", strconv.Quote(string(x.Desc.FullName())), ",")
+ g.P("Tag: ", strconv.Quote(fieldProtobufTagValue(x.Extension)), ",")
+ g.P("Filename: ", strconv.Quote(f.Desc.Path()), ",")
+ g.P("},")
+ }
+ g.P("}")
+ g.P()
+
+ // Group extensions by the target message.
+ var orderedTargets []protogen.GoIdent
+ allExtensionsByTarget := make(map[protogen.GoIdent][]*extensionInfo)
+ allExtensionsByPtr := make(map[*extensionInfo]int)
+ for i, x := range f.allExtensions {
+ target := x.Extendee.GoIdent
+ if len(allExtensionsByTarget[target]) == 0 {
+ orderedTargets = append(orderedTargets, target)
+ }
+ allExtensionsByTarget[target] = append(allExtensionsByTarget[target], x)
+ allExtensionsByPtr[x] = i
+ }
+ for _, target := range orderedTargets {
+ g.P("// Extension fields to ", target, ".")
+ g.P("var (")
+ for _, x := range allExtensionsByTarget[target] {
+ xd := x.Desc
+ typeName := xd.Kind().String()
+ switch xd.Kind() {
+ case protoreflect.EnumKind:
+ typeName = string(xd.Enum().FullName())
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ typeName = string(xd.Message().FullName())
+ }
+ fieldName := string(xd.Name())
+
+ leadingComments := x.Comments.Leading
+ if leadingComments != "" {
+ leadingComments += "\n"
+ }
+ leadingComments += protogen.Comments(fmt.Sprintf(" %v %v %v = %v;\n",
+ xd.Cardinality(), typeName, fieldName, xd.Number()))
+ leadingComments = appendDeprecationSuffix(leadingComments,
+ x.Desc.ParentFile(),
+ x.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated())
+ g.P(leadingComments,
+ "E_", x.GoIdent, " = &", extensionTypesVarName(f), "[", allExtensionsByPtr[x], "]",
+ trailingComment(x.Comments.Trailing))
+ }
+ g.P(")")
+ g.P()
+ }
+}
+
+// genMessageOneofWrapperTypes generates the oneof wrapper types and
+// associates the types with the parent message type.
+func genMessageOneofWrapperTypes(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
+ for _, oneof := range m.Oneofs {
+ if oneof.Desc.IsSynthetic() {
+ continue
+ }
+ ifName := oneofInterfaceName(oneof)
+ g.P("type ", ifName, " interface {")
+ g.P(ifName, "()")
+ g.P("}")
+ g.P()
+ for _, field := range oneof.Fields {
+ g.Annotate(field.GoIdent.GoName, field.Location)
+ g.Annotate(field.GoIdent.GoName+"."+field.GoName, field.Location)
+ g.P("type ", field.GoIdent, " struct {")
+ goType, _ := fieldGoType(g, f, field)
+ tags := structTags{
+ {"protobuf", fieldProtobufTagValue(field)},
+ }
+ if m.isTracked {
+ tags = append(tags, gotrackTags...)
+ }
+ leadingComments := appendDeprecationSuffix(field.Comments.Leading,
+ field.Desc.ParentFile(),
+ field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated())
+ g.P(leadingComments,
+ field.GoName, " ", goType, tags,
+ trailingComment(field.Comments.Trailing))
+ g.P("}")
+ g.P()
+ }
+ for _, field := range oneof.Fields {
+ g.P("func (*", field.GoIdent, ") ", ifName, "() {}")
+ g.P()
+ }
+ }
+}
+
+// oneofInterfaceName returns the name of the interface type implemented by
+// the oneof field value types.
+func oneofInterfaceName(oneof *protogen.Oneof) string {
+ return "is" + oneof.GoIdent.GoName
+}
+
+// genNoInterfacePragma generates a standalone "nointerface" pragma to
+// decorate methods with field-tracking support.
+func genNoInterfacePragma(g *protogen.GeneratedFile, tracked bool) {
+ if tracked {
+ g.P("//go:nointerface")
+ g.P()
+ }
+}
+
+var gotrackTags = structTags{{"go", "track"}}
+
+// structTags is a data structure for build idiomatic Go struct tags.
+// Each [2]string is a key-value pair, where value is the unescaped string.
+//
+// Example: structTags{{"key", "value"}}.String() -> `key:"value"`
+type structTags [][2]string
+
+func (tags structTags) String() string {
+ if len(tags) == 0 {
+ return ""
+ }
+ var ss []string
+ for _, tag := range tags {
+ // NOTE: When quoting the value, we need to make sure the backtick
+ // character does not appear. Convert all cases to the escaped hex form.
+ key := tag[0]
+ val := strings.Replace(strconv.Quote(tag[1]), "`", `\x60`, -1)
+ ss = append(ss, fmt.Sprintf("%s:%s", key, val))
+ }
+ return "`" + strings.Join(ss, " ") + "`"
+}
+
+// appendDeprecationSuffix optionally appends a deprecation notice as a suffix.
+func appendDeprecationSuffix(prefix protogen.Comments, parentFile protoreflect.FileDescriptor, deprecated bool) protogen.Comments {
+ fileDeprecated := parentFile.Options().(*descriptorpb.FileOptions).GetDeprecated()
+ if !deprecated && !fileDeprecated {
+ return prefix
+ }
+ if prefix != "" {
+ prefix += "\n"
+ }
+ if fileDeprecated {
+ return prefix + " Deprecated: The entire proto file " + protogen.Comments(parentFile.Path()) + " is marked as deprecated.\n"
+ }
+ return prefix + " Deprecated: Marked as deprecated in " + protogen.Comments(parentFile.Path()) + ".\n"
+}
+
+// trailingComment is like protogen.Comments, but lacks a trailing newline.
+type trailingComment protogen.Comments
+
+func (c trailingComment) String() string {
+ s := strings.TrimSuffix(protogen.Comments(c).String(), "\n")
+ if strings.Contains(s, "\n") {
+ // We don't support multi-lined trailing comments as it is unclear
+ // how to best render them in the generated code.
+ return ""
+ }
+ return s
+}
diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go
new file mode 100644
index 00000000..0048beb1
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go
@@ -0,0 +1,372 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal_gengo
+
+import (
+ "fmt"
+ "math"
+ "strings"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/compiler/protogen"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protopath"
+ "google.golang.org/protobuf/reflect/protorange"
+ "google.golang.org/protobuf/reflect/protoreflect"
+
+ "google.golang.org/protobuf/types/descriptorpb"
+)
+
+func genReflectFileDescriptor(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) {
+ g.P("var ", f.GoDescriptorIdent, " ", protoreflectPackage.Ident("FileDescriptor"))
+ g.P()
+
+ genFileDescriptor(gen, g, f)
+ if len(f.allEnums) > 0 {
+ g.P("var ", enumTypesVarName(f), " = make([]", protoimplPackage.Ident("EnumInfo"), ",", len(f.allEnums), ")")
+ }
+ if len(f.allMessages) > 0 {
+ g.P("var ", messageTypesVarName(f), " = make([]", protoimplPackage.Ident("MessageInfo"), ",", len(f.allMessages), ")")
+ }
+
+ // Generate a unique list of Go types for all declarations and dependencies,
+ // and the associated index into the type list for all dependencies.
+ var goTypes []string
+ var depIdxs []string
+ seen := map[protoreflect.FullName]int{}
+ genDep := func(name protoreflect.FullName, depSource string) {
+ if depSource != "" {
+ line := fmt.Sprintf("%d, // %d: %s -> %s", seen[name], len(depIdxs), depSource, name)
+ depIdxs = append(depIdxs, line)
+ }
+ }
+ genEnum := func(e *protogen.Enum, depSource string) {
+ if e != nil {
+ name := e.Desc.FullName()
+ if _, ok := seen[name]; !ok {
+ line := fmt.Sprintf("(%s)(0), // %d: %s", g.QualifiedGoIdent(e.GoIdent), len(goTypes), name)
+ goTypes = append(goTypes, line)
+ seen[name] = len(seen)
+ }
+ if depSource != "" {
+ genDep(name, depSource)
+ }
+ }
+ }
+ genMessage := func(m *protogen.Message, depSource string) {
+ if m != nil {
+ name := m.Desc.FullName()
+ if _, ok := seen[name]; !ok {
+ line := fmt.Sprintf("(*%s)(nil), // %d: %s", g.QualifiedGoIdent(m.GoIdent), len(goTypes), name)
+ if m.Desc.IsMapEntry() {
+ // Map entry messages have no associated Go type.
+ line = fmt.Sprintf("nil, // %d: %s", len(goTypes), name)
+ }
+ goTypes = append(goTypes, line)
+ seen[name] = len(seen)
+ }
+ if depSource != "" {
+ genDep(name, depSource)
+ }
+ }
+ }
+
+ // This ordering is significant.
+ // See filetype.TypeBuilder.DependencyIndexes.
+ type offsetEntry struct {
+ start int
+ name string
+ }
+ var depOffsets []offsetEntry
+ for _, enum := range f.allEnums {
+ genEnum(enum.Enum, "")
+ }
+ for _, message := range f.allMessages {
+ genMessage(message.Message, "")
+ }
+ depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "field type_name"})
+ for _, message := range f.allMessages {
+ for _, field := range message.Fields {
+ if field.Desc.IsWeak() {
+ continue
+ }
+ source := string(field.Desc.FullName())
+ genEnum(field.Enum, source+":type_name")
+ genMessage(field.Message, source+":type_name")
+ }
+ }
+ depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "extension extendee"})
+ for _, extension := range f.allExtensions {
+ source := string(extension.Desc.FullName())
+ genMessage(extension.Extendee, source+":extendee")
+ }
+ depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "extension type_name"})
+ for _, extension := range f.allExtensions {
+ source := string(extension.Desc.FullName())
+ genEnum(extension.Enum, source+":type_name")
+ genMessage(extension.Message, source+":type_name")
+ }
+ depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "method input_type"})
+ for _, service := range f.Services {
+ for _, method := range service.Methods {
+ source := string(method.Desc.FullName())
+ genMessage(method.Input, source+":input_type")
+ }
+ }
+ depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "method output_type"})
+ for _, service := range f.Services {
+ for _, method := range service.Methods {
+ source := string(method.Desc.FullName())
+ genMessage(method.Output, source+":output_type")
+ }
+ }
+ depOffsets = append(depOffsets, offsetEntry{len(depIdxs), ""})
+ for i := len(depOffsets) - 2; i >= 0; i-- {
+ curr, next := depOffsets[i], depOffsets[i+1]
+ depIdxs = append(depIdxs, fmt.Sprintf("%d, // [%d:%d] is the sub-list for %s",
+ curr.start, curr.start, next.start, curr.name))
+ }
+ if len(depIdxs) > math.MaxInt32 {
+ panic("too many dependencies") // sanity check
+ }
+
+ g.P("var ", goTypesVarName(f), " = []interface{}{")
+ for _, s := range goTypes {
+ g.P(s)
+ }
+ g.P("}")
+
+ g.P("var ", depIdxsVarName(f), " = []int32{")
+ for _, s := range depIdxs {
+ g.P(s)
+ }
+ g.P("}")
+
+ g.P("func init() { ", initFuncName(f.File), "() }")
+
+ g.P("func ", initFuncName(f.File), "() {")
+ g.P("if ", f.GoDescriptorIdent, " != nil {")
+ g.P("return")
+ g.P("}")
+
+ // Ensure that initialization functions for different files in the same Go
+ // package run in the correct order: Call the init funcs for every .proto file
+ // imported by this one that is in the same Go package.
+ for i, imps := 0, f.Desc.Imports(); i < imps.Len(); i++ {
+ impFile := gen.FilesByPath[imps.Get(i).Path()]
+ if impFile.GoImportPath != f.GoImportPath {
+ continue
+ }
+ g.P(initFuncName(impFile), "()")
+ }
+
+ if len(f.allMessages) > 0 {
+ // Populate MessageInfo.Exporters.
+ g.P("if !", protoimplPackage.Ident("UnsafeEnabled"), " {")
+ for _, message := range f.allMessages {
+ if sf := f.allMessageFieldsByPtr[message]; len(sf.unexported) > 0 {
+ idx := f.allMessagesByPtr[message]
+ typesVar := messageTypesVarName(f)
+
+ g.P(typesVar, "[", idx, "].Exporter = func(v interface{}, i int) interface{} {")
+ g.P("switch v := v.(*", message.GoIdent, "); i {")
+ for i := 0; i < sf.count; i++ {
+ if name := sf.unexported[i]; name != "" {
+ g.P("case ", i, ": return &v.", name)
+ }
+ }
+ g.P("default: return nil")
+ g.P("}")
+ g.P("}")
+ }
+ }
+ g.P("}")
+
+ // Populate MessageInfo.OneofWrappers.
+ for _, message := range f.allMessages {
+ if len(message.Oneofs) > 0 {
+ idx := f.allMessagesByPtr[message]
+ typesVar := messageTypesVarName(f)
+
+ // Associate the wrapper types by directly passing them to the MessageInfo.
+ g.P(typesVar, "[", idx, "].OneofWrappers = []interface{} {")
+ for _, oneof := range message.Oneofs {
+ if !oneof.Desc.IsSynthetic() {
+ for _, field := range oneof.Fields {
+ g.P("(*", field.GoIdent, ")(nil),")
+ }
+ }
+ }
+ g.P("}")
+ }
+ }
+ }
+
+ g.P("type x struct{}")
+ g.P("out := ", protoimplPackage.Ident("TypeBuilder"), "{")
+ g.P("File: ", protoimplPackage.Ident("DescBuilder"), "{")
+ g.P("GoPackagePath: ", reflectPackage.Ident("TypeOf"), "(x{}).PkgPath(),")
+ g.P("RawDescriptor: ", rawDescVarName(f), ",")
+ g.P("NumEnums: ", len(f.allEnums), ",")
+ g.P("NumMessages: ", len(f.allMessages), ",")
+ g.P("NumExtensions: ", len(f.allExtensions), ",")
+ g.P("NumServices: ", len(f.Services), ",")
+ g.P("},")
+ g.P("GoTypes: ", goTypesVarName(f), ",")
+ g.P("DependencyIndexes: ", depIdxsVarName(f), ",")
+ if len(f.allEnums) > 0 {
+ g.P("EnumInfos: ", enumTypesVarName(f), ",")
+ }
+ if len(f.allMessages) > 0 {
+ g.P("MessageInfos: ", messageTypesVarName(f), ",")
+ }
+ if len(f.allExtensions) > 0 {
+ g.P("ExtensionInfos: ", extensionTypesVarName(f), ",")
+ }
+ g.P("}.Build()")
+ g.P(f.GoDescriptorIdent, " = out.File")
+
+ // Set inputs to nil to allow GC to reclaim resources.
+ g.P(rawDescVarName(f), " = nil")
+ g.P(goTypesVarName(f), " = nil")
+ g.P(depIdxsVarName(f), " = nil")
+ g.P("}")
+}
+
+// stripSourceRetentionFieldsFromMessage walks the given message tree recursively
+// and clears any fields with the field option: [retention = RETENTION_SOURCE]
+func stripSourceRetentionFieldsFromMessage(m protoreflect.Message) {
+ protorange.Range(m, func(ppv protopath.Values) error {
+ m2, ok := ppv.Index(-1).Value.Interface().(protoreflect.Message)
+ if !ok {
+ return nil
+ }
+ m2.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ fdo, ok := fd.Options().(*descriptorpb.FieldOptions)
+ if ok && fdo.GetRetention() == descriptorpb.FieldOptions_RETENTION_SOURCE {
+ m2.Clear(fd)
+ }
+ return true
+ })
+ return nil
+ })
+}
+
+func genFileDescriptor(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) {
+ descProto := proto.Clone(f.Proto).(*descriptorpb.FileDescriptorProto)
+ descProto.SourceCodeInfo = nil // drop source code information
+ stripSourceRetentionFieldsFromMessage(descProto.ProtoReflect())
+ b, err := proto.MarshalOptions{AllowPartial: true, Deterministic: true}.Marshal(descProto)
+ if err != nil {
+ gen.Error(err)
+ return
+ }
+
+ g.P("var ", rawDescVarName(f), " = []byte{")
+ for len(b) > 0 {
+ n := 16
+ if n > len(b) {
+ n = len(b)
+ }
+
+ s := ""
+ for _, c := range b[:n] {
+ s += fmt.Sprintf("0x%02x,", c)
+ }
+ g.P(s)
+
+ b = b[n:]
+ }
+ g.P("}")
+ g.P()
+
+ if f.needRawDesc {
+ onceVar := rawDescVarName(f) + "Once"
+ dataVar := rawDescVarName(f) + "Data"
+ g.P("var (")
+ g.P(onceVar, " ", syncPackage.Ident("Once"))
+ g.P(dataVar, " = ", rawDescVarName(f))
+ g.P(")")
+ g.P()
+
+ g.P("func ", rawDescVarName(f), "GZIP() []byte {")
+ g.P(onceVar, ".Do(func() {")
+ g.P(dataVar, " = ", protoimplPackage.Ident("X"), ".CompressGZIP(", dataVar, ")")
+ g.P("})")
+ g.P("return ", dataVar)
+ g.P("}")
+ g.P()
+ }
+}
+
+func genEnumReflectMethods(g *protogen.GeneratedFile, f *fileInfo, e *enumInfo) {
+ idx := f.allEnumsByPtr[e]
+ typesVar := enumTypesVarName(f)
+
+ // Descriptor method.
+ g.P("func (", e.GoIdent, ") Descriptor() ", protoreflectPackage.Ident("EnumDescriptor"), " {")
+ g.P("return ", typesVar, "[", idx, "].Descriptor()")
+ g.P("}")
+ g.P()
+
+ // Type method.
+ g.P("func (", e.GoIdent, ") Type() ", protoreflectPackage.Ident("EnumType"), " {")
+ g.P("return &", typesVar, "[", idx, "]")
+ g.P("}")
+ g.P()
+
+ // Number method.
+ g.P("func (x ", e.GoIdent, ") Number() ", protoreflectPackage.Ident("EnumNumber"), " {")
+ g.P("return ", protoreflectPackage.Ident("EnumNumber"), "(x)")
+ g.P("}")
+ g.P()
+}
+
+func genMessageReflectMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
+ idx := f.allMessagesByPtr[m]
+ typesVar := messageTypesVarName(f)
+
+ // ProtoReflect method.
+ g.P("func (x *", m.GoIdent, ") ProtoReflect() ", protoreflectPackage.Ident("Message"), " {")
+ g.P("mi := &", typesVar, "[", idx, "]")
+ g.P("if ", protoimplPackage.Ident("UnsafeEnabled"), " && x != nil {")
+ g.P("ms := ", protoimplPackage.Ident("X"), ".MessageStateOf(", protoimplPackage.Ident("Pointer"), "(x))")
+ g.P("if ms.LoadMessageInfo() == nil {")
+ g.P("ms.StoreMessageInfo(mi)")
+ g.P("}")
+ g.P("return ms")
+ g.P("}")
+ g.P("return mi.MessageOf(x)")
+ g.P("}")
+ g.P()
+}
+
+func fileVarName(f *protogen.File, suffix string) string {
+ prefix := f.GoDescriptorIdent.GoName
+ _, n := utf8.DecodeRuneInString(prefix)
+ prefix = strings.ToLower(prefix[:n]) + prefix[n:]
+ return prefix + "_" + suffix
+}
+func rawDescVarName(f *fileInfo) string {
+ return fileVarName(f.File, "rawDesc")
+}
+func goTypesVarName(f *fileInfo) string {
+ return fileVarName(f.File, "goTypes")
+}
+func depIdxsVarName(f *fileInfo) string {
+ return fileVarName(f.File, "depIdxs")
+}
+func enumTypesVarName(f *fileInfo) string {
+ return fileVarName(f.File, "enumTypes")
+}
+func messageTypesVarName(f *fileInfo) string {
+ return fileVarName(f.File, "msgTypes")
+}
+func extensionTypesVarName(f *fileInfo) string {
+ return fileVarName(f.File, "extTypes")
+}
+func initFuncName(f *protogen.File) string {
+ return fileVarName(f, "init")
+}
diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go
new file mode 100644
index 00000000..47c4fa18
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go
@@ -0,0 +1,1079 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package internal_gengo
+
+import (
+ "strings"
+
+ "google.golang.org/protobuf/compiler/protogen"
+ "google.golang.org/protobuf/internal/genid"
+)
+
+// Specialized support for well-known types are hard-coded into the generator
+// as opposed to being injected in adjacent .go sources in the generated package
+// in order to support specialized build systems like Bazel that always generate
+// dynamically from the source .proto files.
+
+func genPackageKnownComment(f *fileInfo) protogen.Comments {
+ switch f.Desc.Path() {
+ case genid.File_google_protobuf_any_proto:
+ return ` Package anypb contains generated types for ` + genid.File_google_protobuf_any_proto + `.
+
+ The Any message is a dynamic representation of any other message value.
+ It is functionally a tuple of the full name of the remote message type and
+ the serialized bytes of the remote message value.
+
+
+ Constructing an Any
+
+ An Any message containing another message value is constructed using New:
+
+ any, err := anypb.New(m)
+ if err != nil {
+ ... // handle error
+ }
+ ... // make use of any
+
+
+ Unmarshaling an Any
+
+ With a populated Any message, the underlying message can be serialized into
+ a remote concrete message value in a few ways.
+
+ If the exact concrete type is known, then a new (or pre-existing) instance
+ of that message can be passed to the UnmarshalTo method:
+
+ m := new(foopb.MyMessage)
+ if err := any.UnmarshalTo(m); err != nil {
+ ... // handle error
+ }
+ ... // make use of m
+
+ If the exact concrete type is not known, then the UnmarshalNew method can be
+ used to unmarshal the contents into a new instance of the remote message type:
+
+ m, err := any.UnmarshalNew()
+ if err != nil {
+ ... // handle error
+ }
+ ... // make use of m
+
+ UnmarshalNew uses the global type registry to resolve the message type and
+ construct a new instance of that message to unmarshal into. In order for a
+ message type to appear in the global registry, the Go type representing that
+ protobuf message type must be linked into the Go binary. For messages
+ generated by protoc-gen-go, this is achieved through an import of the
+ generated Go package representing a .proto file.
+
+ A common pattern with UnmarshalNew is to use a type switch with the resulting
+ proto.Message value:
+
+ switch m := m.(type) {
+ case *foopb.MyMessage:
+ ... // make use of m as a *foopb.MyMessage
+ case *barpb.OtherMessage:
+ ... // make use of m as a *barpb.OtherMessage
+ case *bazpb.SomeMessage:
+ ... // make use of m as a *bazpb.SomeMessage
+ }
+
+ This pattern ensures that the generated packages containing the message types
+ listed in the case clauses are linked into the Go binary and therefore also
+ registered in the global registry.
+
+
+ Type checking an Any
+
+ In order to type check whether an Any message represents some other message,
+ then use the MessageIs method:
+
+ if any.MessageIs((*foopb.MyMessage)(nil)) {
+ ... // make use of any, knowing that it contains a foopb.MyMessage
+ }
+
+ The MessageIs method can also be used with an allocated instance of the target
+ message type if the intention is to unmarshal into it if the type matches:
+
+ m := new(foopb.MyMessage)
+ if any.MessageIs(m) {
+ if err := any.UnmarshalTo(m); err != nil {
+ ... // handle error
+ }
+ ... // make use of m
+ }
+
+`
+ case genid.File_google_protobuf_timestamp_proto:
+ return ` Package timestamppb contains generated types for ` + genid.File_google_protobuf_timestamp_proto + `.
+
+ The Timestamp message represents a timestamp,
+ an instant in time since the Unix epoch (January 1st, 1970).
+
+
+ Conversion to a Go Time
+
+ The AsTime method can be used to convert a Timestamp message to a
+ standard Go time.Time value in UTC:
+
+ t := ts.AsTime()
+ ... // make use of t as a time.Time
+
+ Converting to a time.Time is a common operation so that the extensive
+ set of time-based operations provided by the time package can be leveraged.
+ See https://golang.org/pkg/time for more information.
+
+ The AsTime method performs the conversion on a best-effort basis. Timestamps
+ with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive)
+ are normalized during the conversion to a time.Time. To manually check for
+ invalid Timestamps per the documented limitations in timestamp.proto,
+ additionally call the CheckValid method:
+
+ if err := ts.CheckValid(); err != nil {
+ ... // handle error
+ }
+
+
+ Conversion from a Go Time
+
+ The timestamppb.New function can be used to construct a Timestamp message
+ from a standard Go time.Time value:
+
+ ts := timestamppb.New(t)
+ ... // make use of ts as a *timestamppb.Timestamp
+
+ In order to construct a Timestamp representing the current time, use Now:
+
+ ts := timestamppb.Now()
+ ... // make use of ts as a *timestamppb.Timestamp
+
+`
+ case genid.File_google_protobuf_duration_proto:
+ return ` Package durationpb contains generated types for ` + genid.File_google_protobuf_duration_proto + `.
+
+ The Duration message represents a signed span of time.
+
+
+ Conversion to a Go Duration
+
+ The AsDuration method can be used to convert a Duration message to a
+ standard Go time.Duration value:
+
+ d := dur.AsDuration()
+ ... // make use of d as a time.Duration
+
+ Converting to a time.Duration is a common operation so that the extensive
+ set of time-based operations provided by the time package can be leveraged.
+ See https://golang.org/pkg/time for more information.
+
+ The AsDuration method performs the conversion on a best-effort basis.
+ Durations with denormal values (e.g., nanoseconds beyond -99999999 and
+ +99999999, inclusive; or seconds and nanoseconds with opposite signs)
+ are normalized during the conversion to a time.Duration. To manually check for
+ invalid Duration per the documented limitations in duration.proto,
+ additionally call the CheckValid method:
+
+ if err := dur.CheckValid(); err != nil {
+ ... // handle error
+ }
+
+ Note that the documented limitations in duration.proto does not protect a
+ Duration from overflowing the representable range of a time.Duration in Go.
+ The AsDuration method uses saturation arithmetic such that an overflow clamps
+ the resulting value to the closest representable value (e.g., math.MaxInt64
+ for positive overflow and math.MinInt64 for negative overflow).
+
+
+ Conversion from a Go Duration
+
+ The durationpb.New function can be used to construct a Duration message
+ from a standard Go time.Duration value:
+
+ dur := durationpb.New(d)
+ ... // make use of d as a *durationpb.Duration
+
+`
+ case genid.File_google_protobuf_struct_proto:
+ return ` Package structpb contains generated types for ` + genid.File_google_protobuf_struct_proto + `.
+
+ The messages (i.e., Value, Struct, and ListValue) defined in struct.proto are
+ used to represent arbitrary JSON. The Value message represents a JSON value,
+ the Struct message represents a JSON object, and the ListValue message
+ represents a JSON array. See https://json.org for more information.
+
+ The Value, Struct, and ListValue types have generated MarshalJSON and
+ UnmarshalJSON methods such that they serialize JSON equivalent to what the
+ messages themselves represent. Use of these types with the
+ "google.golang.org/protobuf/encoding/protojson" package
+ ensures that they will be serialized as their JSON equivalent.
+
+ # Conversion to and from a Go interface
+
+ The standard Go "encoding/json" package has functionality to serialize
+ arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and
+ ListValue.AsSlice methods can convert the protobuf message representation into
+ a form represented by interface{}, map[string]interface{}, and []interface{}.
+ This form can be used with other packages that operate on such data structures
+ and also directly with the standard json package.
+
+ In order to convert the interface{}, map[string]interface{}, and []interface{}
+ forms back as Value, Struct, and ListValue messages, use the NewStruct,
+ NewList, and NewValue constructor functions.
+
+ # Example usage
+
+ Consider the following example JSON object:
+
+ {
+ "firstName": "John",
+ "lastName": "Smith",
+ "isAlive": true,
+ "age": 27,
+ "address": {
+ "streetAddress": "21 2nd Street",
+ "city": "New York",
+ "state": "NY",
+ "postalCode": "10021-3100"
+ },
+ "phoneNumbers": [
+ {
+ "type": "home",
+ "number": "212 555-1234"
+ },
+ {
+ "type": "office",
+ "number": "646 555-4567"
+ }
+ ],
+ "children": [],
+ "spouse": null
+ }
+
+ To construct a Value message representing the above JSON object:
+
+ m, err := structpb.NewValue(map[string]interface{}{
+ "firstName": "John",
+ "lastName": "Smith",
+ "isAlive": true,
+ "age": 27,
+ "address": map[string]interface{}{
+ "streetAddress": "21 2nd Street",
+ "city": "New York",
+ "state": "NY",
+ "postalCode": "10021-3100",
+ },
+ "phoneNumbers": []interface{}{
+ map[string]interface{}{
+ "type": "home",
+ "number": "212 555-1234",
+ },
+ map[string]interface{}{
+ "type": "office",
+ "number": "646 555-4567",
+ },
+ },
+ "children": []interface{}{},
+ "spouse": nil,
+ })
+ if err != nil {
+ ... // handle error
+ }
+ ... // make use of m as a *structpb.Value
+`
+ case genid.File_google_protobuf_field_mask_proto:
+ return ` Package fieldmaskpb contains generated types for ` + genid.File_google_protobuf_field_mask_proto + `.
+
+ The FieldMask message represents a set of symbolic field paths.
+ The paths are specific to some target message type,
+ which is not stored within the FieldMask message itself.
+
+
+ Constructing a FieldMask
+
+ The New function is used construct a FieldMask:
+
+ var messageType *descriptorpb.DescriptorProto
+ fm, err := fieldmaskpb.New(messageType, "field.name", "field.number")
+ if err != nil {
+ ... // handle error
+ }
+ ... // make use of fm
+
+ The "field.name" and "field.number" paths are valid paths according to the
+ google.protobuf.DescriptorProto message. Use of a path that does not correlate
+ to valid fields reachable from DescriptorProto would result in an error.
+
+ Once a FieldMask message has been constructed,
+ the Append method can be used to insert additional paths to the path set:
+
+ var messageType *descriptorpb.DescriptorProto
+ if err := fm.Append(messageType, "options"); err != nil {
+ ... // handle error
+ }
+
+
+ Type checking a FieldMask
+
+ In order to verify that a FieldMask represents a set of fields that are
+ reachable from some target message type, use the IsValid method:
+
+ var messageType *descriptorpb.DescriptorProto
+ if fm.IsValid(messageType) {
+ ... // make use of fm
+ }
+
+ IsValid needs to be passed the target message type as an input since the
+ FieldMask message itself does not store the message type that the set of paths
+ are for.
+`
+ default:
+ return ""
+ }
+}
+
+func genMessageKnownFunctions(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) {
+ switch m.Desc.FullName() {
+ case genid.Any_message_fullname:
+ g.P("// New marshals src into a new Any instance.")
+ g.P("func New(src ", protoPackage.Ident("Message"), ") (*Any, error) {")
+ g.P(" dst := new(Any)")
+ g.P(" if err := dst.MarshalFrom(src); err != nil {")
+ g.P(" return nil, err")
+ g.P(" }")
+ g.P(" return dst, nil")
+ g.P("}")
+ g.P()
+
+ g.P("// MarshalFrom marshals src into dst as the underlying message")
+ g.P("// using the provided marshal options.")
+ g.P("//")
+ g.P("// If no options are specified, call dst.MarshalFrom instead.")
+ g.P("func MarshalFrom(dst *Any, src ", protoPackage.Ident("Message"), ", opts ", protoPackage.Ident("MarshalOptions"), ") error {")
+ g.P(" const urlPrefix = \"type.googleapis.com/\"")
+ g.P(" if src == nil {")
+ g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil source message\")")
+ g.P(" }")
+ g.P(" b, err := opts.Marshal(src)")
+ g.P(" if err != nil {")
+ g.P(" return err")
+ g.P(" }")
+ g.P(" dst.TypeUrl = urlPrefix + string(src.ProtoReflect().Descriptor().FullName())")
+ g.P(" dst.Value = b")
+ g.P(" return nil")
+ g.P("}")
+ g.P()
+
+ g.P("// UnmarshalTo unmarshals the underlying message from src into dst")
+ g.P("// using the provided unmarshal options.")
+ g.P("// It reports an error if dst is not of the right message type.")
+ g.P("//")
+ g.P("// If no options are specified, call src.UnmarshalTo instead.")
+ g.P("func UnmarshalTo(src *Any, dst ", protoPackage.Ident("Message"), ", opts ", protoPackage.Ident("UnmarshalOptions"), ") error {")
+ g.P(" if src == nil {")
+ g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil source message\")")
+ g.P(" }")
+ g.P(" if !src.MessageIs(dst) {")
+ g.P(" got := dst.ProtoReflect().Descriptor().FullName()")
+ g.P(" want := src.MessageName()")
+ g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"mismatched message type: got %q, want %q\", got, want)")
+ g.P(" }")
+ g.P(" return opts.Unmarshal(src.GetValue(), dst)")
+ g.P("}")
+ g.P()
+
+ g.P("// UnmarshalNew unmarshals the underlying message from src into dst,")
+ g.P("// which is newly created message using a type resolved from the type URL.")
+ g.P("// The message type is resolved according to opt.Resolver,")
+ g.P("// which should implement protoregistry.MessageTypeResolver.")
+ g.P("// It reports an error if the underlying message type could not be resolved.")
+ g.P("//")
+ g.P("// If no options are specified, call src.UnmarshalNew instead.")
+ g.P("func UnmarshalNew(src *Any, opts ", protoPackage.Ident("UnmarshalOptions"), ") (dst ", protoPackage.Ident("Message"), ", err error) {")
+ g.P(" if src.GetTypeUrl() == \"\" {")
+ g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid empty type URL\")")
+ g.P(" }")
+ g.P(" if opts.Resolver == nil {")
+ g.P(" opts.Resolver = ", protoregistryPackage.Ident("GlobalTypes"))
+ g.P(" }")
+ g.P(" r, ok := opts.Resolver.(", protoregistryPackage.Ident("MessageTypeResolver"), ")")
+ g.P(" if !ok {")
+ g.P(" return nil, ", protoregistryPackage.Ident("NotFound"))
+ g.P(" }")
+ g.P(" mt, err := r.FindMessageByURL(src.GetTypeUrl())")
+ g.P(" if err != nil {")
+ g.P(" if err == ", protoregistryPackage.Ident("NotFound"), " {")
+ g.P(" return nil, err")
+ g.P(" }")
+ g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"could not resolve %q: %v\", src.GetTypeUrl(), err)")
+ g.P(" }")
+ g.P(" dst = mt.New().Interface()")
+ g.P(" return dst, opts.Unmarshal(src.GetValue(), dst)")
+ g.P("}")
+ g.P()
+
+ g.P("// MessageIs reports whether the underlying message is of the same type as m.")
+ g.P("func (x *Any) MessageIs(m ", protoPackage.Ident("Message"), ") bool {")
+ g.P(" if m == nil {")
+ g.P(" return false")
+ g.P(" }")
+ g.P(" url := x.GetTypeUrl()")
+ g.P(" name := string(m.ProtoReflect().Descriptor().FullName())")
+ g.P(" if !", stringsPackage.Ident("HasSuffix"), "(url, name) {")
+ g.P(" return false")
+ g.P(" }")
+ g.P(" return len(url) == len(name) || url[len(url)-len(name)-1] == '/'")
+ g.P("}")
+ g.P()
+
+ g.P("// MessageName reports the full name of the underlying message,")
+ g.P("// returning an empty string if invalid.")
+ g.P("func (x *Any) MessageName() ", protoreflectPackage.Ident("FullName"), " {")
+ g.P(" url := x.GetTypeUrl()")
+ g.P(" name := ", protoreflectPackage.Ident("FullName"), "(url)")
+ g.P(" if i := ", stringsPackage.Ident("LastIndexByte"), "(url, '/'); i >= 0 {")
+ g.P(" name = name[i+len(\"/\"):]")
+ g.P(" }")
+ g.P(" if !name.IsValid() {")
+ g.P(" return \"\"")
+ g.P(" }")
+ g.P(" return name")
+ g.P("}")
+ g.P()
+
+ g.P("// MarshalFrom marshals m into x as the underlying message.")
+ g.P("func (x *Any) MarshalFrom(m ", protoPackage.Ident("Message"), ") error {")
+ g.P(" return MarshalFrom(x, m, ", protoPackage.Ident("MarshalOptions"), "{})")
+ g.P("}")
+ g.P()
+
+ g.P("// UnmarshalTo unmarshals the contents of the underlying message of x into m.")
+ g.P("// It resets m before performing the unmarshal operation.")
+ g.P("// It reports an error if m is not of the right message type.")
+ g.P("func (x *Any) UnmarshalTo(m ", protoPackage.Ident("Message"), ") error {")
+ g.P(" return UnmarshalTo(x, m, ", protoPackage.Ident("UnmarshalOptions"), "{})")
+ g.P("}")
+ g.P()
+
+ g.P("// UnmarshalNew unmarshals the contents of the underlying message of x into")
+ g.P("// a newly allocated message of the specified type.")
+ g.P("// It reports an error if the underlying message type could not be resolved.")
+ g.P("func (x *Any) UnmarshalNew() (", protoPackage.Ident("Message"), ", error) {")
+ g.P(" return UnmarshalNew(x, ", protoPackage.Ident("UnmarshalOptions"), "{})")
+ g.P("}")
+ g.P()
+
+ case genid.Timestamp_message_fullname:
+ g.P("// Now constructs a new Timestamp from the current time.")
+ g.P("func Now() *Timestamp {")
+ g.P(" return New(", timePackage.Ident("Now"), "())")
+ g.P("}")
+ g.P()
+
+ g.P("// New constructs a new Timestamp from the provided time.Time.")
+ g.P("func New(t ", timePackage.Ident("Time"), ") *Timestamp {")
+ g.P(" return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())}")
+ g.P("}")
+ g.P()
+
+ g.P("// AsTime converts x to a time.Time.")
+ g.P("func (x *Timestamp) AsTime() ", timePackage.Ident("Time"), " {")
+ g.P(" return ", timePackage.Ident("Unix"), "(int64(x.GetSeconds()), int64(x.GetNanos())).UTC()")
+ g.P("}")
+ g.P()
+
+ g.P("// IsValid reports whether the timestamp is valid.")
+ g.P("// It is equivalent to CheckValid == nil.")
+ g.P("func (x *Timestamp) IsValid() bool {")
+ g.P(" return x.check() == 0")
+ g.P("}")
+ g.P()
+
+ g.P("// CheckValid returns an error if the timestamp is invalid.")
+ g.P("// In particular, it checks whether the value represents a date that is")
+ g.P("// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive.")
+ g.P("// An error is reported for a nil Timestamp.")
+ g.P("func (x *Timestamp) CheckValid() error {")
+ g.P(" switch x.check() {")
+ g.P(" case invalidNil:")
+ g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil Timestamp\")")
+ g.P(" case invalidUnderflow:")
+ g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"timestamp (%v) before 0001-01-01\", x)")
+ g.P(" case invalidOverflow:")
+ g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"timestamp (%v) after 9999-12-31\", x)")
+ g.P(" case invalidNanos:")
+ g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"timestamp (%v) has out-of-range nanos\", x)")
+ g.P(" default:")
+ g.P(" return nil")
+ g.P(" }")
+ g.P("}")
+ g.P()
+
+ g.P("const (")
+ g.P(" _ = iota")
+ g.P(" invalidNil")
+ g.P(" invalidUnderflow")
+ g.P(" invalidOverflow")
+ g.P(" invalidNanos")
+ g.P(")")
+ g.P()
+
+ g.P("func (x *Timestamp) check() uint {")
+ g.P(" const minTimestamp = -62135596800 // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive")
+ g.P(" const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive")
+ g.P(" secs := x.GetSeconds()")
+ g.P(" nanos := x.GetNanos()")
+ g.P(" switch {")
+ g.P(" case x == nil:")
+ g.P(" return invalidNil")
+ g.P(" case secs < minTimestamp:")
+ g.P(" return invalidUnderflow")
+ g.P(" case secs > maxTimestamp:")
+ g.P(" return invalidOverflow")
+ g.P(" case nanos < 0 || nanos >= 1e9:")
+ g.P(" return invalidNanos")
+ g.P(" default:")
+ g.P(" return 0")
+ g.P(" }")
+ g.P("}")
+ g.P()
+
+ case genid.Duration_message_fullname:
+ g.P("// New constructs a new Duration from the provided time.Duration.")
+ g.P("func New(d ", timePackage.Ident("Duration"), ") *Duration {")
+ g.P(" nanos := d.Nanoseconds()")
+ g.P(" secs := nanos / 1e9")
+ g.P(" nanos -= secs * 1e9")
+ g.P(" return &Duration{Seconds: int64(secs), Nanos: int32(nanos)}")
+ g.P("}")
+ g.P()
+
+ g.P("// AsDuration converts x to a time.Duration,")
+ g.P("// returning the closest duration value in the event of overflow.")
+ g.P("func (x *Duration) AsDuration() ", timePackage.Ident("Duration"), " {")
+ g.P(" secs := x.GetSeconds()")
+ g.P(" nanos := x.GetNanos()")
+ g.P(" d := ", timePackage.Ident("Duration"), "(secs) * ", timePackage.Ident("Second"))
+ g.P(" overflow := d/", timePackage.Ident("Second"), " != ", timePackage.Ident("Duration"), "(secs)")
+ g.P(" d += ", timePackage.Ident("Duration"), "(nanos) * ", timePackage.Ident("Nanosecond"))
+ g.P(" overflow = overflow || (secs < 0 && nanos < 0 && d > 0)")
+ g.P(" overflow = overflow || (secs > 0 && nanos > 0 && d < 0)")
+ g.P(" if overflow {")
+ g.P(" switch {")
+ g.P(" case secs < 0:")
+ g.P(" return ", timePackage.Ident("Duration"), "(", mathPackage.Ident("MinInt64"), ")")
+ g.P(" case secs > 0:")
+ g.P(" return ", timePackage.Ident("Duration"), "(", mathPackage.Ident("MaxInt64"), ")")
+ g.P(" }")
+ g.P(" }")
+ g.P(" return d")
+ g.P("}")
+ g.P()
+
+ g.P("// IsValid reports whether the duration is valid.")
+ g.P("// It is equivalent to CheckValid == nil.")
+ g.P("func (x *Duration) IsValid() bool {")
+ g.P(" return x.check() == 0")
+ g.P("}")
+ g.P()
+
+ g.P("// CheckValid returns an error if the duration is invalid.")
+ g.P("// In particular, it checks whether the value is within the range of")
+ g.P("// -10000 years to +10000 years inclusive.")
+ g.P("// An error is reported for a nil Duration.")
+ g.P("func (x *Duration) CheckValid() error {")
+ g.P(" switch x.check() {")
+ g.P(" case invalidNil:")
+ g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil Duration\")")
+ g.P(" case invalidUnderflow:")
+ g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) exceeds -10000 years\", x)")
+ g.P(" case invalidOverflow:")
+ g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) exceeds +10000 years\", x)")
+ g.P(" case invalidNanosRange:")
+ g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) has out-of-range nanos\", x)")
+ g.P(" case invalidNanosSign:")
+ g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) has seconds and nanos with different signs\", x)")
+ g.P(" default:")
+ g.P(" return nil")
+ g.P(" }")
+ g.P("}")
+ g.P()
+
+ g.P("const (")
+ g.P(" _ = iota")
+ g.P(" invalidNil")
+ g.P(" invalidUnderflow")
+ g.P(" invalidOverflow")
+ g.P(" invalidNanosRange")
+ g.P(" invalidNanosSign")
+ g.P(")")
+ g.P()
+
+ g.P("func (x *Duration) check() uint {")
+ g.P(" const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min")
+ g.P(" secs := x.GetSeconds()")
+ g.P(" nanos := x.GetNanos()")
+ g.P(" switch {")
+ g.P(" case x == nil:")
+ g.P(" return invalidNil")
+ g.P(" case secs < -absDuration:")
+ g.P(" return invalidUnderflow")
+ g.P(" case secs > +absDuration:")
+ g.P(" return invalidOverflow")
+ g.P(" case nanos <= -1e9 || nanos >= +1e9:")
+ g.P(" return invalidNanosRange")
+ g.P(" case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0):")
+ g.P(" return invalidNanosSign")
+ g.P(" default:")
+ g.P(" return 0")
+ g.P(" }")
+ g.P("}")
+ g.P()
+
+ case genid.Struct_message_fullname:
+ g.P("// NewStruct constructs a Struct from a general-purpose Go map.")
+ g.P("// The map keys must be valid UTF-8.")
+ g.P("// The map values are converted using NewValue.")
+ g.P("func NewStruct(v map[string]interface{}) (*Struct, error) {")
+ g.P(" x := &Struct{Fields: make(map[string]*Value, len(v))}")
+ g.P(" for k, v := range v {")
+ g.P(" if !", utf8Package.Ident("ValidString"), "(k) {")
+ g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid UTF-8 in string: %q\", k)")
+ g.P(" }")
+ g.P(" var err error")
+ g.P(" x.Fields[k], err = NewValue(v)")
+ g.P(" if err != nil {")
+ g.P(" return nil, err")
+ g.P(" }")
+ g.P(" }")
+ g.P(" return x, nil")
+ g.P("}")
+ g.P()
+
+ g.P("// AsMap converts x to a general-purpose Go map.")
+ g.P("// The map values are converted by calling Value.AsInterface.")
+ g.P("func (x *Struct) AsMap() map[string]interface{} {")
+ g.P(" f := x.GetFields()")
+ g.P(" vs := make(map[string]interface{}, len(f))")
+ g.P(" for k, v := range f {")
+ g.P(" vs[k] = v.AsInterface()")
+ g.P(" }")
+ g.P(" return vs")
+ g.P("}")
+ g.P()
+
+ g.P("func (x *Struct) MarshalJSON() ([]byte, error) {")
+ g.P(" return ", protojsonPackage.Ident("Marshal"), "(x)")
+ g.P("}")
+ g.P()
+
+ g.P("func (x *Struct) UnmarshalJSON(b []byte) error {")
+ g.P(" return ", protojsonPackage.Ident("Unmarshal"), "(b, x)")
+ g.P("}")
+ g.P()
+
+ case genid.ListValue_message_fullname:
+ g.P("// NewList constructs a ListValue from a general-purpose Go slice.")
+ g.P("// The slice elements are converted using NewValue.")
+ g.P("func NewList(v []interface{}) (*ListValue, error) {")
+ g.P(" x := &ListValue{Values: make([]*Value, len(v))}")
+ g.P(" for i, v := range v {")
+ g.P(" var err error")
+ g.P(" x.Values[i], err = NewValue(v)")
+ g.P(" if err != nil {")
+ g.P(" return nil, err")
+ g.P(" }")
+ g.P(" }")
+ g.P(" return x, nil")
+ g.P("}")
+ g.P()
+
+ g.P("// AsSlice converts x to a general-purpose Go slice.")
+ g.P("// The slice elements are converted by calling Value.AsInterface.")
+ g.P("func (x *ListValue) AsSlice() []interface{} {")
+ g.P(" vals := x.GetValues()")
+ g.P(" vs := make([]interface{}, len(vals))")
+ g.P(" for i, v := range vals {")
+ g.P(" vs[i] = v.AsInterface()")
+ g.P(" }")
+ g.P(" return vs")
+ g.P("}")
+ g.P()
+
+ g.P("func (x *ListValue) MarshalJSON() ([]byte, error) {")
+ g.P(" return ", protojsonPackage.Ident("Marshal"), "(x)")
+ g.P("}")
+ g.P()
+
+ g.P("func (x *ListValue) UnmarshalJSON(b []byte) error {")
+ g.P(" return ", protojsonPackage.Ident("Unmarshal"), "(b, x)")
+ g.P("}")
+ g.P()
+
+ case genid.Value_message_fullname:
+ g.P("// NewValue constructs a Value from a general-purpose Go interface.")
+ g.P("//")
+ g.P("// ╔════════════════════════╤════════════════════════════════════════════╗")
+ g.P("// ║ Go type │ Conversion ║")
+ g.P("// ╠════════════════════════╪════════════════════════════════════════════╣")
+ g.P("// ║ nil │ stored as NullValue ║")
+ g.P("// ║ bool │ stored as BoolValue ║")
+ g.P("// ║ int, int32, int64 │ stored as NumberValue ║")
+ g.P("// ║ uint, uint32, uint64 │ stored as NumberValue ║")
+ g.P("// ║ float32, float64 │ stored as NumberValue ║")
+ g.P("// ║ string │ stored as StringValue; must be valid UTF-8 ║")
+ g.P("// ║ []byte │ stored as StringValue; base64-encoded ║")
+ g.P("// ║ map[string]interface{} │ stored as StructValue ║")
+ g.P("// ║ []interface{} │ stored as ListValue ║")
+ g.P("// ╚════════════════════════╧════════════════════════════════════════════╝")
+ g.P("//")
+ g.P("// When converting an int64 or uint64 to a NumberValue, numeric precision loss")
+ g.P("// is possible since they are stored as a float64.")
+ g.P("func NewValue(v interface{}) (*Value, error) {")
+ g.P(" switch v := v.(type) {")
+ g.P(" case nil:")
+ g.P(" return NewNullValue(), nil")
+ g.P(" case bool:")
+ g.P(" return NewBoolValue(v), nil")
+ g.P(" case int:")
+ g.P(" return NewNumberValue(float64(v)), nil")
+ g.P(" case int32:")
+ g.P(" return NewNumberValue(float64(v)), nil")
+ g.P(" case int64:")
+ g.P(" return NewNumberValue(float64(v)), nil")
+ g.P(" case uint:")
+ g.P(" return NewNumberValue(float64(v)), nil")
+ g.P(" case uint32:")
+ g.P(" return NewNumberValue(float64(v)), nil")
+ g.P(" case uint64:")
+ g.P(" return NewNumberValue(float64(v)), nil")
+ g.P(" case float32:")
+ g.P(" return NewNumberValue(float64(v)), nil")
+ g.P(" case float64:")
+ g.P(" return NewNumberValue(float64(v)), nil")
+ g.P(" case string:")
+ g.P(" if !", utf8Package.Ident("ValidString"), "(v) {")
+ g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid UTF-8 in string: %q\", v)")
+ g.P(" }")
+ g.P(" return NewStringValue(v), nil")
+ g.P(" case []byte:")
+ g.P(" s := ", base64Package.Ident("StdEncoding"), ".EncodeToString(v)")
+ g.P(" return NewStringValue(s), nil")
+ g.P(" case map[string]interface{}:")
+ g.P(" v2, err := NewStruct(v)")
+ g.P(" if err != nil {")
+ g.P(" return nil, err")
+ g.P(" }")
+ g.P(" return NewStructValue(v2), nil")
+ g.P(" case []interface{}:")
+ g.P(" v2, err := NewList(v)")
+ g.P(" if err != nil {")
+ g.P(" return nil, err")
+ g.P(" }")
+ g.P(" return NewListValue(v2), nil")
+ g.P(" default:")
+ g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid type: %T\", v)")
+ g.P(" }")
+ g.P("}")
+ g.P()
+
+ g.P("// NewNullValue constructs a new null Value.")
+ g.P("func NewNullValue() *Value {")
+ g.P(" return &Value{Kind: &Value_NullValue{NullValue: NullValue_NULL_VALUE}}")
+ g.P("}")
+ g.P()
+
+ g.P("// NewBoolValue constructs a new boolean Value.")
+ g.P("func NewBoolValue(v bool) *Value {")
+ g.P(" return &Value{Kind: &Value_BoolValue{BoolValue: v}}")
+ g.P("}")
+ g.P()
+
+ g.P("// NewNumberValue constructs a new number Value.")
+ g.P("func NewNumberValue(v float64) *Value {")
+ g.P(" return &Value{Kind: &Value_NumberValue{NumberValue: v}}")
+ g.P("}")
+ g.P()
+
+ g.P("// NewStringValue constructs a new string Value.")
+ g.P("func NewStringValue(v string) *Value {")
+ g.P(" return &Value{Kind: &Value_StringValue{StringValue: v}}")
+ g.P("}")
+ g.P()
+
+ g.P("// NewStructValue constructs a new struct Value.")
+ g.P("func NewStructValue(v *Struct) *Value {")
+ g.P(" return &Value{Kind: &Value_StructValue{StructValue: v}}")
+ g.P("}")
+ g.P()
+
+ g.P("// NewListValue constructs a new list Value.")
+ g.P("func NewListValue(v *ListValue) *Value {")
+ g.P(" return &Value{Kind: &Value_ListValue{ListValue: v}}")
+ g.P("}")
+ g.P()
+
+ g.P("// AsInterface converts x to a general-purpose Go interface.")
+ g.P("//")
+ g.P("// Calling Value.MarshalJSON and \"encoding/json\".Marshal on this output produce")
+ g.P("// semantically equivalent JSON (assuming no errors occur).")
+ g.P("//")
+ g.P("// Floating-point values (i.e., \"NaN\", \"Infinity\", and \"-Infinity\") are")
+ g.P("// converted as strings to remain compatible with MarshalJSON.")
+ g.P("func (x *Value) AsInterface() interface{} {")
+ g.P(" switch v := x.GetKind().(type) {")
+ g.P(" case *Value_NumberValue:")
+ g.P(" if v != nil {")
+ g.P(" switch {")
+ g.P(" case ", mathPackage.Ident("IsNaN"), "(v.NumberValue):")
+ g.P(" return \"NaN\"")
+ g.P(" case ", mathPackage.Ident("IsInf"), "(v.NumberValue, +1):")
+ g.P(" return \"Infinity\"")
+ g.P(" case ", mathPackage.Ident("IsInf"), "(v.NumberValue, -1):")
+ g.P(" return \"-Infinity\"")
+ g.P(" default:")
+ g.P(" return v.NumberValue")
+ g.P(" }")
+ g.P(" }")
+ g.P(" case *Value_StringValue:")
+ g.P(" if v != nil {")
+ g.P(" return v.StringValue")
+ g.P(" }")
+ g.P(" case *Value_BoolValue:")
+ g.P(" if v != nil {")
+ g.P(" return v.BoolValue")
+ g.P(" }")
+ g.P(" case *Value_StructValue:")
+ g.P(" if v != nil {")
+ g.P(" return v.StructValue.AsMap()")
+ g.P(" }")
+ g.P(" case *Value_ListValue:")
+ g.P(" if v != nil {")
+ g.P(" return v.ListValue.AsSlice()")
+ g.P(" }")
+ g.P(" }")
+ g.P(" return nil")
+ g.P("}")
+ g.P()
+
+ g.P("func (x *Value) MarshalJSON() ([]byte, error) {")
+ g.P(" return ", protojsonPackage.Ident("Marshal"), "(x)")
+ g.P("}")
+ g.P()
+
+ g.P("func (x *Value) UnmarshalJSON(b []byte) error {")
+ g.P(" return ", protojsonPackage.Ident("Unmarshal"), "(b, x)")
+ g.P("}")
+ g.P()
+
+ case genid.FieldMask_message_fullname:
+ g.P("// New constructs a field mask from a list of paths and verifies that")
+ g.P("// each one is valid according to the specified message type.")
+ g.P("func New(m ", protoPackage.Ident("Message"), ", paths ...string) (*FieldMask, error) {")
+ g.P(" x := new(FieldMask)")
+ g.P(" return x, x.Append(m, paths...)")
+ g.P("}")
+ g.P()
+
+ g.P("// Union returns the union of all the paths in the input field masks.")
+ g.P("func Union(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {")
+ g.P(" var out []string")
+ g.P(" out = append(out, mx.GetPaths()...)")
+ g.P(" out = append(out, my.GetPaths()...)")
+ g.P(" for _, m := range ms {")
+ g.P(" out = append(out, m.GetPaths()...)")
+ g.P(" }")
+ g.P(" return &FieldMask{Paths: normalizePaths(out)}")
+ g.P("}")
+ g.P()
+
+ g.P("// Intersect returns the intersection of all the paths in the input field masks.")
+ g.P("func Intersect(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {")
+ g.P(" var ss1, ss2 []string // reused buffers for performance")
+ g.P(" intersect := func(out, in []string) []string {")
+ g.P(" ss1 = normalizePaths(append(ss1[:0], in...))")
+ g.P(" ss2 = normalizePaths(append(ss2[:0], out...))")
+ g.P(" out = out[:0]")
+ g.P(" for i1, i2 := 0, 0; i1 < len(ss1) && i2 < len(ss2); {")
+ g.P(" switch s1, s2 := ss1[i1], ss2[i2]; {")
+ g.P(" case hasPathPrefix(s1, s2):")
+ g.P(" out = append(out, s1)")
+ g.P(" i1++")
+ g.P(" case hasPathPrefix(s2, s1):")
+ g.P(" out = append(out, s2)")
+ g.P(" i2++")
+ g.P(" case lessPath(s1, s2):")
+ g.P(" i1++")
+ g.P(" case lessPath(s2, s1):")
+ g.P(" i2++")
+ g.P(" }")
+ g.P(" }")
+ g.P(" return out")
+ g.P(" }")
+ g.P()
+ g.P(" out := Union(mx, my, ms...).GetPaths()")
+ g.P(" out = intersect(out, mx.GetPaths())")
+ g.P(" out = intersect(out, my.GetPaths())")
+ g.P(" for _, m := range ms {")
+ g.P(" out = intersect(out, m.GetPaths())")
+ g.P(" }")
+ g.P(" return &FieldMask{Paths: normalizePaths(out)}")
+ g.P("}")
+ g.P()
+
+ g.P("// IsValid reports whether all the paths are syntactically valid and")
+ g.P("// refer to known fields in the specified message type.")
+ g.P("// It reports false for a nil FieldMask.")
+ g.P("func (x *FieldMask) IsValid(m ", protoPackage.Ident("Message"), ") bool {")
+ g.P(" paths := x.GetPaths()")
+ g.P(" return x != nil && numValidPaths(m, paths) == len(paths)")
+ g.P("}")
+ g.P()
+
+ g.P("// Append appends a list of paths to the mask and verifies that each one")
+ g.P("// is valid according to the specified message type.")
+ g.P("// An invalid path is not appended and breaks insertion of subsequent paths.")
+ g.P("func (x *FieldMask) Append(m ", protoPackage.Ident("Message"), ", paths ...string) error {")
+ g.P(" numValid := numValidPaths(m, paths)")
+ g.P(" x.Paths = append(x.Paths, paths[:numValid]...)")
+ g.P(" paths = paths[numValid:]")
+ g.P(" if len(paths) > 0 {")
+ g.P(" name := m.ProtoReflect().Descriptor().FullName()")
+ g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid path %q for message %q\", paths[0], name)")
+ g.P(" }")
+ g.P(" return nil")
+ g.P("}")
+ g.P()
+
+ g.P("func numValidPaths(m ", protoPackage.Ident("Message"), ", paths []string) int {")
+ g.P(" md0 := m.ProtoReflect().Descriptor()")
+ g.P(" for i, path := range paths {")
+ g.P(" md := md0")
+ g.P(" if !rangeFields(path, func(field string) bool {")
+ g.P(" // Search the field within the message.")
+ g.P(" if md == nil {")
+ g.P(" return false // not within a message")
+ g.P(" }")
+ g.P(" fd := md.Fields().ByName(", protoreflectPackage.Ident("Name"), "(field))")
+ g.P(" // The real field name of a group is the message name.")
+ g.P(" if fd == nil {")
+ g.P(" gd := md.Fields().ByName(", protoreflectPackage.Ident("Name"), "(", stringsPackage.Ident("ToLower"), "(field)))")
+ g.P(" if gd != nil && gd.Kind() == ", protoreflectPackage.Ident("GroupKind"), " && string(gd.Message().Name()) == field {")
+ g.P(" fd = gd")
+ g.P(" }")
+ g.P(" } else if fd.Kind() == ", protoreflectPackage.Ident("GroupKind"), " && string(fd.Message().Name()) != field {")
+ g.P(" fd = nil")
+ g.P(" }")
+ g.P(" if fd == nil {")
+ g.P(" return false // message has does not have this field")
+ g.P(" }")
+ g.P()
+ g.P(" // Identify the next message to search within.")
+ g.P(" md = fd.Message() // may be nil")
+ g.P()
+ g.P(" // Repeated fields are only allowed at the last position.")
+ g.P(" if fd.IsList() || fd.IsMap() {")
+ g.P(" md = nil")
+ g.P(" }")
+ g.P()
+ g.P(" return true")
+ g.P(" }) {")
+ g.P(" return i")
+ g.P(" }")
+ g.P(" }")
+ g.P(" return len(paths)")
+ g.P("}")
+ g.P()
+
+ g.P("// Normalize converts the mask to its canonical form where all paths are sorted")
+ g.P("// and redundant paths are removed.")
+ g.P("func (x *FieldMask) Normalize() {")
+ g.P(" x.Paths = normalizePaths(x.Paths)")
+ g.P("}")
+ g.P()
+ g.P("func normalizePaths(paths []string) []string {")
+ g.P(" ", sortPackage.Ident("Slice"), "(paths, func(i, j int) bool {")
+ g.P(" return lessPath(paths[i], paths[j])")
+ g.P(" })")
+ g.P()
+ g.P(" // Elide any path that is a prefix match on the previous.")
+ g.P(" out := paths[:0]")
+ g.P(" for _, path := range paths {")
+ g.P(" if len(out) > 0 && hasPathPrefix(path, out[len(out)-1]) {")
+ g.P(" continue")
+ g.P(" }")
+ g.P(" out = append(out, path)")
+ g.P(" }")
+ g.P(" return out")
+ g.P("}")
+ g.P()
+
+ g.P("// hasPathPrefix is like strings.HasPrefix, but further checks for either")
+ g.P("// an exact matche or that the prefix is delimited by a dot.")
+ g.P("func hasPathPrefix(path, prefix string) bool {")
+ g.P(" return ", stringsPackage.Ident("HasPrefix"), "(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '.')")
+ g.P("}")
+ g.P()
+
+ g.P("// lessPath is a lexicographical comparison where dot is specially treated")
+ g.P("// as the smallest symbol.")
+ g.P("func lessPath(x, y string) bool {")
+ g.P(" for i := 0; i < len(x) && i < len(y); i++ {")
+ g.P(" if x[i] != y[i] {")
+ g.P(" return (x[i] - '.') < (y[i] - '.')")
+ g.P(" }")
+ g.P(" }")
+ g.P(" return len(x) < len(y)")
+ g.P("}")
+ g.P()
+
+ g.P("// rangeFields is like strings.Split(path, \".\"), but avoids allocations by")
+ g.P("// iterating over each field in place and calling a iterator function.")
+ g.P("func rangeFields(path string, f func(field string) bool) bool {")
+ g.P(" for {")
+ g.P(" var field string")
+ g.P(" if i := ", stringsPackage.Ident("IndexByte"), "(path, '.'); i >= 0 {")
+ g.P(" field, path = path[:i], path[i:]")
+ g.P(" } else {")
+ g.P(" field, path = path, \"\"")
+ g.P(" }")
+ g.P()
+ g.P(" if !f(field) {")
+ g.P(" return false")
+ g.P(" }")
+ g.P()
+ g.P(" if len(path) == 0 {")
+ g.P(" return true")
+ g.P(" }")
+ g.P(" path = ", stringsPackage.Ident("TrimPrefix"), "(path, \".\")")
+ g.P(" }")
+ g.P("}")
+ g.P()
+
+ case genid.BoolValue_message_fullname,
+ genid.Int32Value_message_fullname,
+ genid.Int64Value_message_fullname,
+ genid.UInt32Value_message_fullname,
+ genid.UInt64Value_message_fullname,
+ genid.FloatValue_message_fullname,
+ genid.DoubleValue_message_fullname,
+ genid.StringValue_message_fullname,
+ genid.BytesValue_message_fullname:
+ funcName := strings.TrimSuffix(m.GoIdent.GoName, "Value")
+ typeName := strings.ToLower(funcName)
+ switch typeName {
+ case "float":
+ typeName = "float32"
+ case "double":
+ typeName = "float64"
+ case "bytes":
+ typeName = "[]byte"
+ }
+
+ g.P("// ", funcName, " stores v in a new ", m.GoIdent, " and returns a pointer to it.")
+ g.P("func ", funcName, "(v ", typeName, ") *", m.GoIdent, " {")
+ g.P(" return &", m.GoIdent, "{Value: v}")
+ g.P("}")
+ g.P()
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/main.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/main.go
new file mode 100644
index 00000000..e67236d8
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/main.go
@@ -0,0 +1,56 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The protoc-gen-go binary is a protoc plugin to generate Go code for
+// both proto2 and proto3 versions of the protocol buffer language.
+//
+// For more information about the usage of this plugin, see:
+// https://protobuf.dev/reference/go/go-generated.
+package main
+
+import (
+ "errors"
+ "flag"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ gengo "google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo"
+ "google.golang.org/protobuf/compiler/protogen"
+ "google.golang.org/protobuf/internal/version"
+)
+
+const genGoDocURL = "https://protobuf.dev/reference/go/go-generated"
+const grpcDocURL = "https://grpc.io/docs/languages/go/quickstart/#regenerate-grpc-code"
+
+func main() {
+ if len(os.Args) == 2 && os.Args[1] == "--version" {
+ fmt.Fprintf(os.Stdout, "%v %v\n", filepath.Base(os.Args[0]), version.String())
+ os.Exit(0)
+ }
+ if len(os.Args) == 2 && os.Args[1] == "--help" {
+ fmt.Fprintf(os.Stdout, "See "+genGoDocURL+" for usage information.\n")
+ os.Exit(0)
+ }
+
+ var (
+ flags flag.FlagSet
+ plugins = flags.String("plugins", "", "deprecated option")
+ )
+ protogen.Options{
+ ParamFunc: flags.Set,
+ }.Run(func(gen *protogen.Plugin) error {
+ if *plugins != "" {
+ return errors.New("protoc-gen-go: plugins are not supported; use 'protoc --go-grpc_out=...' to generate gRPC\n\n" +
+ "See " + grpcDocURL + " for more information.")
+ }
+ for _, f := range gen.Files {
+ if f.Generate {
+ gengo.GenerateFile(gen, f)
+ }
+ }
+ gen.SupportedFeatures = gengo.SupportedFeatures
+ return nil
+ })
+}
diff --git a/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go b/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go
new file mode 100644
index 00000000..2d2171e5
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go
@@ -0,0 +1,1357 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protogen provides support for writing protoc plugins.
+//
+// Plugins for protoc, the Protocol Buffer compiler,
+// are programs which read a CodeGeneratorRequest message from standard input
+// and write a CodeGeneratorResponse message to standard output.
+// This package provides support for writing plugins which generate Go code.
+package protogen
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "go/types"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/internal/genid"
+ "google.golang.org/protobuf/internal/strs"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protodesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+
+ "google.golang.org/protobuf/types/descriptorpb"
+ "google.golang.org/protobuf/types/dynamicpb"
+ "google.golang.org/protobuf/types/pluginpb"
+)
+
+const goPackageDocURL = "https://protobuf.dev/reference/go/go-generated#package"
+
+// Run executes a function as a protoc plugin.
+//
+// It reads a CodeGeneratorRequest message from os.Stdin, invokes the plugin
+// function, and writes a CodeGeneratorResponse message to os.Stdout.
+//
+// If a failure occurs while reading or writing, Run prints an error to
+// os.Stderr and calls os.Exit(1).
+func (opts Options) Run(f func(*Plugin) error) {
+ if err := run(opts, f); err != nil {
+ fmt.Fprintf(os.Stderr, "%s: %v\n", filepath.Base(os.Args[0]), err)
+ os.Exit(1)
+ }
+}
+
+func run(opts Options, f func(*Plugin) error) error {
+ if len(os.Args) > 1 {
+ return fmt.Errorf("unknown argument %q (this program should be run by protoc, not directly)", os.Args[1])
+ }
+ in, err := ioutil.ReadAll(os.Stdin)
+ if err != nil {
+ return err
+ }
+ req := &pluginpb.CodeGeneratorRequest{}
+ if err := proto.Unmarshal(in, req); err != nil {
+ return err
+ }
+ gen, err := opts.New(req)
+ if err != nil {
+ return err
+ }
+ if err := f(gen); err != nil {
+ // Errors from the plugin function are reported by setting the
+ // error field in the CodeGeneratorResponse.
+ //
+ // In contrast, errors that indicate a problem in protoc
+ // itself (unparsable input, I/O errors, etc.) are reported
+ // to stderr.
+ gen.Error(err)
+ }
+ resp := gen.Response()
+ out, err := proto.Marshal(resp)
+ if err != nil {
+ return err
+ }
+ if _, err := os.Stdout.Write(out); err != nil {
+ return err
+ }
+ return nil
+}
+
+// A Plugin is a protoc plugin invocation.
+type Plugin struct {
+ // Request is the CodeGeneratorRequest provided by protoc.
+ Request *pluginpb.CodeGeneratorRequest
+
+ // Files is the set of files to generate and everything they import.
+ // Files appear in topological order, so each file appears before any
+ // file that imports it.
+ Files []*File
+ FilesByPath map[string]*File
+
+ // SupportedFeatures is the set of protobuf language features supported by
+ // this generator plugin. See the documentation for
+ // google.protobuf.CodeGeneratorResponse.supported_features for details.
+ SupportedFeatures uint64
+
+ fileReg *protoregistry.Files
+ enumsByName map[protoreflect.FullName]*Enum
+ messagesByName map[protoreflect.FullName]*Message
+ annotateCode bool
+ pathType pathType
+ module string
+ genFiles []*GeneratedFile
+ opts Options
+ err error
+}
+
+type Options struct {
+ // If ParamFunc is non-nil, it will be called with each unknown
+ // generator parameter.
+ //
+ // Plugins for protoc can accept parameters from the command line,
+ // passed in the --_out protoc, separated from the output
+ // directory with a colon; e.g.,
+ //
+ // --go_out==,=:
+ //
+ // Parameters passed in this fashion as a comma-separated list of
+ // key=value pairs will be passed to the ParamFunc.
+ //
+ // The (flag.FlagSet).Set method matches this function signature,
+ // so parameters can be converted into flags as in the following:
+ //
+ // var flags flag.FlagSet
+ // value := flags.Bool("param", false, "")
+ // opts := &protogen.Options{
+ // ParamFunc: flags.Set,
+ // }
+ // protogen.Run(opts, func(p *protogen.Plugin) error {
+ // if *value { ... }
+ // })
+ ParamFunc func(name, value string) error
+
+ // ImportRewriteFunc is called with the import path of each package
+ // imported by a generated file. It returns the import path to use
+ // for this package.
+ ImportRewriteFunc func(GoImportPath) GoImportPath
+}
+
+// New returns a new Plugin.
+func (opts Options) New(req *pluginpb.CodeGeneratorRequest) (*Plugin, error) {
+ gen := &Plugin{
+ Request: req,
+ FilesByPath: make(map[string]*File),
+ fileReg: new(protoregistry.Files),
+ enumsByName: make(map[protoreflect.FullName]*Enum),
+ messagesByName: make(map[protoreflect.FullName]*Message),
+ opts: opts,
+ }
+
+ packageNames := make(map[string]GoPackageName) // filename -> package name
+ importPaths := make(map[string]GoImportPath) // filename -> import path
+ for _, param := range strings.Split(req.GetParameter(), ",") {
+ var value string
+ if i := strings.Index(param, "="); i >= 0 {
+ value = param[i+1:]
+ param = param[0:i]
+ }
+ switch param {
+ case "":
+ // Ignore.
+ case "module":
+ gen.module = value
+ case "paths":
+ switch value {
+ case "import":
+ gen.pathType = pathTypeImport
+ case "source_relative":
+ gen.pathType = pathTypeSourceRelative
+ default:
+ return nil, fmt.Errorf(`unknown path type %q: want "import" or "source_relative"`, value)
+ }
+ case "annotate_code":
+ switch value {
+ case "true", "":
+ gen.annotateCode = true
+ case "false":
+ default:
+ return nil, fmt.Errorf(`bad value for parameter %q: want "true" or "false"`, param)
+ }
+ default:
+ if param[0] == 'M' {
+ impPath, pkgName := splitImportPathAndPackageName(value)
+ if pkgName != "" {
+ packageNames[param[1:]] = pkgName
+ }
+ if impPath != "" {
+ importPaths[param[1:]] = impPath
+ }
+ continue
+ }
+ if opts.ParamFunc != nil {
+ if err := opts.ParamFunc(param, value); err != nil {
+ return nil, err
+ }
+ }
+ }
+ }
+
+ // When the module= option is provided, we strip the module name
+ // prefix from generated files. This only makes sense if generated
+ // filenames are based on the import path.
+ if gen.module != "" && gen.pathType == pathTypeSourceRelative {
+ return nil, fmt.Errorf("cannot use module= with paths=source_relative")
+ }
+
+ // Figure out the import path and package name for each file.
+ //
+ // The rules here are complicated and have grown organically over time.
+ // Interactions between different ways of specifying package information
+ // may be surprising.
+ //
+ // The recommended approach is to include a go_package option in every
+ // .proto source file specifying the full import path of the Go package
+ // associated with this file.
+ //
+ // option go_package = "google.golang.org/protobuf/types/known/anypb";
+ //
+ // Alternatively, build systems which want to exert full control over
+ // import paths may specify M= flags.
+ for _, fdesc := range gen.Request.ProtoFile {
+ // The "M" command-line flags take precedence over
+ // the "go_package" option in the .proto source file.
+ filename := fdesc.GetName()
+ impPath, pkgName := splitImportPathAndPackageName(fdesc.GetOptions().GetGoPackage())
+ if importPaths[filename] == "" && impPath != "" {
+ importPaths[filename] = impPath
+ }
+ if packageNames[filename] == "" && pkgName != "" {
+ packageNames[filename] = pkgName
+ }
+ switch {
+ case importPaths[filename] == "":
+ // The import path must be specified one way or another.
+ return nil, fmt.Errorf(
+ "unable to determine Go import path for %q\n\n"+
+ "Please specify either:\n"+
+ "\t• a \"go_package\" option in the .proto source file, or\n"+
+ "\t• a \"M\" argument on the command line.\n\n"+
+ "See %v for more information.\n",
+ fdesc.GetName(), goPackageDocURL)
+ case !strings.Contains(string(importPaths[filename]), ".") &&
+ !strings.Contains(string(importPaths[filename]), "/"):
+ // Check that import paths contain at least a dot or slash to avoid
+ // a common mistake where import path is confused with package name.
+ return nil, fmt.Errorf(
+ "invalid Go import path %q for %q\n\n"+
+ "The import path must contain at least one period ('.') or forward slash ('/') character.\n\n"+
+ "See %v for more information.\n",
+ string(importPaths[filename]), fdesc.GetName(), goPackageDocURL)
+ case packageNames[filename] == "":
+ // If the package name is not explicitly specified,
+ // then derive a reasonable package name from the import path.
+ //
+ // NOTE: The package name is derived first from the import path in
+ // the "go_package" option (if present) before trying the "M" flag.
+ // The inverted order for this is because the primary use of the "M"
+ // flag is by build systems that have full control over the
+ // import paths all packages, where it is generally expected that
+ // the Go package name still be identical for the Go toolchain and
+ // for custom build systems like Bazel.
+ if impPath == "" {
+ impPath = importPaths[filename]
+ }
+ packageNames[filename] = cleanPackageName(path.Base(string(impPath)))
+ }
+ }
+
+ // Consistency check: Every file with the same Go import path should have
+ // the same Go package name.
+ packageFiles := make(map[GoImportPath][]string)
+ for filename, importPath := range importPaths {
+ if _, ok := packageNames[filename]; !ok {
+ // Skip files mentioned in a M= parameter
+ // but which do not appear in the CodeGeneratorRequest.
+ continue
+ }
+ packageFiles[importPath] = append(packageFiles[importPath], filename)
+ }
+ for importPath, filenames := range packageFiles {
+ for i := 1; i < len(filenames); i++ {
+ if a, b := packageNames[filenames[0]], packageNames[filenames[i]]; a != b {
+ return nil, fmt.Errorf("Go package %v has inconsistent names %v (%v) and %v (%v)",
+ importPath, a, filenames[0], b, filenames[i])
+ }
+ }
+ }
+
+ // The extracted types from the full import set
+ typeRegistry := newExtensionRegistry()
+ for _, fdesc := range gen.Request.ProtoFile {
+ filename := fdesc.GetName()
+ if gen.FilesByPath[filename] != nil {
+ return nil, fmt.Errorf("duplicate file name: %q", filename)
+ }
+ f, err := newFile(gen, fdesc, packageNames[filename], importPaths[filename])
+ if err != nil {
+ return nil, err
+ }
+ gen.Files = append(gen.Files, f)
+ gen.FilesByPath[filename] = f
+ if err = typeRegistry.registerAllExtensionsFromFile(f.Desc); err != nil {
+ return nil, err
+ }
+ }
+ for _, filename := range gen.Request.FileToGenerate {
+ f, ok := gen.FilesByPath[filename]
+ if !ok {
+ return nil, fmt.Errorf("no descriptor for generated file: %v", filename)
+ }
+ f.Generate = true
+ }
+
+ // Create fully-linked descriptors if new extensions were found
+ if typeRegistry.hasNovelExtensions() {
+ for _, f := range gen.Files {
+ b, err := proto.Marshal(f.Proto.ProtoReflect().Interface())
+ if err != nil {
+ return nil, err
+ }
+ err = proto.UnmarshalOptions{Resolver: typeRegistry}.Unmarshal(b, f.Proto)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ return gen, nil
+}
+
+// Error records an error in code generation. The generator will report the
+// error back to protoc and will not produce output.
+func (gen *Plugin) Error(err error) {
+ if gen.err == nil {
+ gen.err = err
+ }
+}
+
+// Response returns the generator output.
+func (gen *Plugin) Response() *pluginpb.CodeGeneratorResponse {
+ resp := &pluginpb.CodeGeneratorResponse{}
+ if gen.err != nil {
+ resp.Error = proto.String(gen.err.Error())
+ return resp
+ }
+ for _, g := range gen.genFiles {
+ if g.skip {
+ continue
+ }
+ content, err := g.Content()
+ if err != nil {
+ return &pluginpb.CodeGeneratorResponse{
+ Error: proto.String(err.Error()),
+ }
+ }
+ filename := g.filename
+ if gen.module != "" {
+ trim := gen.module + "/"
+ if !strings.HasPrefix(filename, trim) {
+ return &pluginpb.CodeGeneratorResponse{
+ Error: proto.String(fmt.Sprintf("%v: generated file does not match prefix %q", filename, gen.module)),
+ }
+ }
+ filename = strings.TrimPrefix(filename, trim)
+ }
+ resp.File = append(resp.File, &pluginpb.CodeGeneratorResponse_File{
+ Name: proto.String(filename),
+ Content: proto.String(string(content)),
+ })
+ if gen.annotateCode && strings.HasSuffix(g.filename, ".go") {
+ meta, err := g.metaFile(content)
+ if err != nil {
+ return &pluginpb.CodeGeneratorResponse{
+ Error: proto.String(err.Error()),
+ }
+ }
+ resp.File = append(resp.File, &pluginpb.CodeGeneratorResponse_File{
+ Name: proto.String(filename + ".meta"),
+ Content: proto.String(meta),
+ })
+ }
+ }
+ if gen.SupportedFeatures > 0 {
+ resp.SupportedFeatures = proto.Uint64(gen.SupportedFeatures)
+ }
+ return resp
+}
+
+// A File describes a .proto source file.
+type File struct {
+ Desc protoreflect.FileDescriptor
+ Proto *descriptorpb.FileDescriptorProto
+
+ GoDescriptorIdent GoIdent // name of Go variable for the file descriptor
+ GoPackageName GoPackageName // name of this file's Go package
+ GoImportPath GoImportPath // import path of this file's Go package
+
+ Enums []*Enum // top-level enum declarations
+ Messages []*Message // top-level message declarations
+ Extensions []*Extension // top-level extension declarations
+ Services []*Service // top-level service declarations
+
+ Generate bool // true if we should generate code for this file
+
+ // GeneratedFilenamePrefix is used to construct filenames for generated
+ // files associated with this source file.
+ //
+ // For example, the source file "dir/foo.proto" might have a filename prefix
+ // of "dir/foo". Appending ".pb.go" produces an output file of "dir/foo.pb.go".
+ GeneratedFilenamePrefix string
+
+ location Location
+}
+
+func newFile(gen *Plugin, p *descriptorpb.FileDescriptorProto, packageName GoPackageName, importPath GoImportPath) (*File, error) {
+ desc, err := protodesc.NewFile(p, gen.fileReg)
+ if err != nil {
+ return nil, fmt.Errorf("invalid FileDescriptorProto %q: %v", p.GetName(), err)
+ }
+ if err := gen.fileReg.RegisterFile(desc); err != nil {
+ return nil, fmt.Errorf("cannot register descriptor %q: %v", p.GetName(), err)
+ }
+ f := &File{
+ Desc: desc,
+ Proto: p,
+ GoPackageName: packageName,
+ GoImportPath: importPath,
+ location: Location{SourceFile: desc.Path()},
+ }
+
+ // Determine the prefix for generated Go files.
+ prefix := p.GetName()
+ if ext := path.Ext(prefix); ext == ".proto" || ext == ".protodevel" {
+ prefix = prefix[:len(prefix)-len(ext)]
+ }
+ switch gen.pathType {
+ case pathTypeImport:
+ // If paths=import, the output filename is derived from the Go import path.
+ prefix = path.Join(string(f.GoImportPath), path.Base(prefix))
+ case pathTypeSourceRelative:
+ // If paths=source_relative, the output filename is derived from
+ // the input filename.
+ }
+ f.GoDescriptorIdent = GoIdent{
+ GoName: "File_" + strs.GoSanitized(p.GetName()),
+ GoImportPath: f.GoImportPath,
+ }
+ f.GeneratedFilenamePrefix = prefix
+
+ for i, eds := 0, desc.Enums(); i < eds.Len(); i++ {
+ f.Enums = append(f.Enums, newEnum(gen, f, nil, eds.Get(i)))
+ }
+ for i, mds := 0, desc.Messages(); i < mds.Len(); i++ {
+ f.Messages = append(f.Messages, newMessage(gen, f, nil, mds.Get(i)))
+ }
+ for i, xds := 0, desc.Extensions(); i < xds.Len(); i++ {
+ f.Extensions = append(f.Extensions, newField(gen, f, nil, xds.Get(i)))
+ }
+ for i, sds := 0, desc.Services(); i < sds.Len(); i++ {
+ f.Services = append(f.Services, newService(gen, f, sds.Get(i)))
+ }
+ for _, message := range f.Messages {
+ if err := message.resolveDependencies(gen); err != nil {
+ return nil, err
+ }
+ }
+ for _, extension := range f.Extensions {
+ if err := extension.resolveDependencies(gen); err != nil {
+ return nil, err
+ }
+ }
+ for _, service := range f.Services {
+ for _, method := range service.Methods {
+ if err := method.resolveDependencies(gen); err != nil {
+ return nil, err
+ }
+ }
+ }
+ return f, nil
+}
+
+// splitImportPathAndPackageName splits off the optional Go package name
+// from the Go import path when separated by a ';' delimiter.
+func splitImportPathAndPackageName(s string) (GoImportPath, GoPackageName) {
+ if i := strings.Index(s, ";"); i >= 0 {
+ return GoImportPath(s[:i]), GoPackageName(s[i+1:])
+ }
+ return GoImportPath(s), ""
+}
+
+// An Enum describes an enum.
+type Enum struct {
+ Desc protoreflect.EnumDescriptor
+
+ GoIdent GoIdent // name of the generated Go type
+
+ Values []*EnumValue // enum value declarations
+
+ Location Location // location of this enum
+ Comments CommentSet // comments associated with this enum
+}
+
+func newEnum(gen *Plugin, f *File, parent *Message, desc protoreflect.EnumDescriptor) *Enum {
+ var loc Location
+ if parent != nil {
+ loc = parent.Location.appendPath(genid.DescriptorProto_EnumType_field_number, desc.Index())
+ } else {
+ loc = f.location.appendPath(genid.FileDescriptorProto_EnumType_field_number, desc.Index())
+ }
+ enum := &Enum{
+ Desc: desc,
+ GoIdent: newGoIdent(f, desc),
+ Location: loc,
+ Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)),
+ }
+ gen.enumsByName[desc.FullName()] = enum
+ for i, vds := 0, enum.Desc.Values(); i < vds.Len(); i++ {
+ enum.Values = append(enum.Values, newEnumValue(gen, f, parent, enum, vds.Get(i)))
+ }
+ return enum
+}
+
+// An EnumValue describes an enum value.
+type EnumValue struct {
+ Desc protoreflect.EnumValueDescriptor
+
+ GoIdent GoIdent // name of the generated Go declaration
+
+ Parent *Enum // enum in which this value is declared
+
+ Location Location // location of this enum value
+ Comments CommentSet // comments associated with this enum value
+}
+
+func newEnumValue(gen *Plugin, f *File, message *Message, enum *Enum, desc protoreflect.EnumValueDescriptor) *EnumValue {
+ // A top-level enum value's name is: EnumName_ValueName
+ // An enum value contained in a message is: MessageName_ValueName
+ //
+ // For historical reasons, enum value names are not camel-cased.
+ parentIdent := enum.GoIdent
+ if message != nil {
+ parentIdent = message.GoIdent
+ }
+ name := parentIdent.GoName + "_" + string(desc.Name())
+ loc := enum.Location.appendPath(genid.EnumDescriptorProto_Value_field_number, desc.Index())
+ return &EnumValue{
+ Desc: desc,
+ GoIdent: f.GoImportPath.Ident(name),
+ Parent: enum,
+ Location: loc,
+ Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)),
+ }
+}
+
+// A Message describes a message.
+type Message struct {
+ Desc protoreflect.MessageDescriptor
+
+ GoIdent GoIdent // name of the generated Go type
+
+ Fields []*Field // message field declarations
+ Oneofs []*Oneof // message oneof declarations
+
+ Enums []*Enum // nested enum declarations
+ Messages []*Message // nested message declarations
+ Extensions []*Extension // nested extension declarations
+
+ Location Location // location of this message
+ Comments CommentSet // comments associated with this message
+}
+
+func newMessage(gen *Plugin, f *File, parent *Message, desc protoreflect.MessageDescriptor) *Message {
+ var loc Location
+ if parent != nil {
+ loc = parent.Location.appendPath(genid.DescriptorProto_NestedType_field_number, desc.Index())
+ } else {
+ loc = f.location.appendPath(genid.FileDescriptorProto_MessageType_field_number, desc.Index())
+ }
+ message := &Message{
+ Desc: desc,
+ GoIdent: newGoIdent(f, desc),
+ Location: loc,
+ Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)),
+ }
+ gen.messagesByName[desc.FullName()] = message
+ for i, eds := 0, desc.Enums(); i < eds.Len(); i++ {
+ message.Enums = append(message.Enums, newEnum(gen, f, message, eds.Get(i)))
+ }
+ for i, mds := 0, desc.Messages(); i < mds.Len(); i++ {
+ message.Messages = append(message.Messages, newMessage(gen, f, message, mds.Get(i)))
+ }
+ for i, fds := 0, desc.Fields(); i < fds.Len(); i++ {
+ message.Fields = append(message.Fields, newField(gen, f, message, fds.Get(i)))
+ }
+ for i, ods := 0, desc.Oneofs(); i < ods.Len(); i++ {
+ message.Oneofs = append(message.Oneofs, newOneof(gen, f, message, ods.Get(i)))
+ }
+ for i, xds := 0, desc.Extensions(); i < xds.Len(); i++ {
+ message.Extensions = append(message.Extensions, newField(gen, f, message, xds.Get(i)))
+ }
+
+ // Resolve local references between fields and oneofs.
+ for _, field := range message.Fields {
+ if od := field.Desc.ContainingOneof(); od != nil {
+ oneof := message.Oneofs[od.Index()]
+ field.Oneof = oneof
+ oneof.Fields = append(oneof.Fields, field)
+ }
+ }
+
+ // Field name conflict resolution.
+ //
+ // We assume well-known method names that may be attached to a generated
+ // message type, as well as a 'Get*' method for each field. For each
+ // field in turn, we add _s to its name until there are no conflicts.
+ //
+ // Any change to the following set of method names is a potential
+ // incompatible API change because it may change generated field names.
+ //
+ // TODO: If we ever support a 'go_name' option to set the Go name of a
+ // field, we should consider dropping this entirely. The conflict
+ // resolution algorithm is subtle and surprising (changing the order
+ // in which fields appear in the .proto source file can change the
+ // names of fields in generated code), and does not adapt well to
+ // adding new per-field methods such as setters.
+ usedNames := map[string]bool{
+ "Reset": true,
+ "String": true,
+ "ProtoMessage": true,
+ "Marshal": true,
+ "Unmarshal": true,
+ "ExtensionRangeArray": true,
+ "ExtensionMap": true,
+ "Descriptor": true,
+ }
+ makeNameUnique := func(name string, hasGetter bool) string {
+ for usedNames[name] || (hasGetter && usedNames["Get"+name]) {
+ name += "_"
+ }
+ usedNames[name] = true
+ usedNames["Get"+name] = hasGetter
+ return name
+ }
+ for _, field := range message.Fields {
+ field.GoName = makeNameUnique(field.GoName, true)
+ field.GoIdent.GoName = message.GoIdent.GoName + "_" + field.GoName
+ if field.Oneof != nil && field.Oneof.Fields[0] == field {
+ // Make the name for a oneof unique as well. For historical reasons,
+ // this assumes that a getter method is not generated for oneofs.
+ // This is incorrect, but fixing it breaks existing code.
+ field.Oneof.GoName = makeNameUnique(field.Oneof.GoName, false)
+ field.Oneof.GoIdent.GoName = message.GoIdent.GoName + "_" + field.Oneof.GoName
+ }
+ }
+
+ // Oneof field name conflict resolution.
+ //
+ // This conflict resolution is incomplete as it does not consider collisions
+ // with other oneof field types, but fixing it breaks existing code.
+ for _, field := range message.Fields {
+ if field.Oneof != nil {
+ Loop:
+ for {
+ for _, nestedMessage := range message.Messages {
+ if nestedMessage.GoIdent == field.GoIdent {
+ field.GoIdent.GoName += "_"
+ continue Loop
+ }
+ }
+ for _, nestedEnum := range message.Enums {
+ if nestedEnum.GoIdent == field.GoIdent {
+ field.GoIdent.GoName += "_"
+ continue Loop
+ }
+ }
+ break Loop
+ }
+ }
+ }
+
+ return message
+}
+
+func (message *Message) resolveDependencies(gen *Plugin) error {
+ for _, field := range message.Fields {
+ if err := field.resolveDependencies(gen); err != nil {
+ return err
+ }
+ }
+ for _, message := range message.Messages {
+ if err := message.resolveDependencies(gen); err != nil {
+ return err
+ }
+ }
+ for _, extension := range message.Extensions {
+ if err := extension.resolveDependencies(gen); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// A Field describes a message field.
+type Field struct {
+ Desc protoreflect.FieldDescriptor
+
+ // GoName is the base name of this field's Go field and methods.
+ // For code generated by protoc-gen-go, this means a field named
+ // '{{GoName}}' and a getter method named 'Get{{GoName}}'.
+ GoName string // e.g., "FieldName"
+
+ // GoIdent is the base name of a top-level declaration for this field.
+ // For code generated by protoc-gen-go, this means a wrapper type named
+ // '{{GoIdent}}' for members fields of a oneof, and a variable named
+ // 'E_{{GoIdent}}' for extension fields.
+ GoIdent GoIdent // e.g., "MessageName_FieldName"
+
+ Parent *Message // message in which this field is declared; nil if top-level extension
+ Oneof *Oneof // containing oneof; nil if not part of a oneof
+ Extendee *Message // extended message for extension fields; nil otherwise
+
+ Enum *Enum // type for enum fields; nil otherwise
+ Message *Message // type for message or group fields; nil otherwise
+
+ Location Location // location of this field
+ Comments CommentSet // comments associated with this field
+}
+
+func newField(gen *Plugin, f *File, message *Message, desc protoreflect.FieldDescriptor) *Field {
+ var loc Location
+ switch {
+ case desc.IsExtension() && message == nil:
+ loc = f.location.appendPath(genid.FileDescriptorProto_Extension_field_number, desc.Index())
+ case desc.IsExtension() && message != nil:
+ loc = message.Location.appendPath(genid.DescriptorProto_Extension_field_number, desc.Index())
+ default:
+ loc = message.Location.appendPath(genid.DescriptorProto_Field_field_number, desc.Index())
+ }
+ camelCased := strs.GoCamelCase(string(desc.Name()))
+ var parentPrefix string
+ if message != nil {
+ parentPrefix = message.GoIdent.GoName + "_"
+ }
+ field := &Field{
+ Desc: desc,
+ GoName: camelCased,
+ GoIdent: GoIdent{
+ GoImportPath: f.GoImportPath,
+ GoName: parentPrefix + camelCased,
+ },
+ Parent: message,
+ Location: loc,
+ Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)),
+ }
+ return field
+}
+
+func (field *Field) resolveDependencies(gen *Plugin) error {
+ desc := field.Desc
+ switch desc.Kind() {
+ case protoreflect.EnumKind:
+ name := field.Desc.Enum().FullName()
+ enum, ok := gen.enumsByName[name]
+ if !ok {
+ return fmt.Errorf("field %v: no descriptor for enum %v", desc.FullName(), name)
+ }
+ field.Enum = enum
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ name := desc.Message().FullName()
+ message, ok := gen.messagesByName[name]
+ if !ok {
+ return fmt.Errorf("field %v: no descriptor for type %v", desc.FullName(), name)
+ }
+ field.Message = message
+ }
+ if desc.IsExtension() {
+ name := desc.ContainingMessage().FullName()
+ message, ok := gen.messagesByName[name]
+ if !ok {
+ return fmt.Errorf("field %v: no descriptor for type %v", desc.FullName(), name)
+ }
+ field.Extendee = message
+ }
+ return nil
+}
+
+// A Oneof describes a message oneof.
+type Oneof struct {
+ Desc protoreflect.OneofDescriptor
+
+ // GoName is the base name of this oneof's Go field and methods.
+ // For code generated by protoc-gen-go, this means a field named
+ // '{{GoName}}' and a getter method named 'Get{{GoName}}'.
+ GoName string // e.g., "OneofName"
+
+ // GoIdent is the base name of a top-level declaration for this oneof.
+ GoIdent GoIdent // e.g., "MessageName_OneofName"
+
+ Parent *Message // message in which this oneof is declared
+
+ Fields []*Field // fields that are part of this oneof
+
+ Location Location // location of this oneof
+ Comments CommentSet // comments associated with this oneof
+}
+
+func newOneof(gen *Plugin, f *File, message *Message, desc protoreflect.OneofDescriptor) *Oneof {
+ loc := message.Location.appendPath(genid.DescriptorProto_OneofDecl_field_number, desc.Index())
+ camelCased := strs.GoCamelCase(string(desc.Name()))
+ parentPrefix := message.GoIdent.GoName + "_"
+ return &Oneof{
+ Desc: desc,
+ Parent: message,
+ GoName: camelCased,
+ GoIdent: GoIdent{
+ GoImportPath: f.GoImportPath,
+ GoName: parentPrefix + camelCased,
+ },
+ Location: loc,
+ Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)),
+ }
+}
+
+// Extension is an alias of Field for documentation.
+type Extension = Field
+
+// A Service describes a service.
+type Service struct {
+ Desc protoreflect.ServiceDescriptor
+
+ GoName string
+
+ Methods []*Method // service method declarations
+
+ Location Location // location of this service
+ Comments CommentSet // comments associated with this service
+}
+
+func newService(gen *Plugin, f *File, desc protoreflect.ServiceDescriptor) *Service {
+ loc := f.location.appendPath(genid.FileDescriptorProto_Service_field_number, desc.Index())
+ service := &Service{
+ Desc: desc,
+ GoName: strs.GoCamelCase(string(desc.Name())),
+ Location: loc,
+ Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)),
+ }
+ for i, mds := 0, desc.Methods(); i < mds.Len(); i++ {
+ service.Methods = append(service.Methods, newMethod(gen, f, service, mds.Get(i)))
+ }
+ return service
+}
+
+// A Method describes a method in a service.
+type Method struct {
+ Desc protoreflect.MethodDescriptor
+
+ GoName string
+
+ Parent *Service // service in which this method is declared
+
+ Input *Message
+ Output *Message
+
+ Location Location // location of this method
+ Comments CommentSet // comments associated with this method
+}
+
+func newMethod(gen *Plugin, f *File, service *Service, desc protoreflect.MethodDescriptor) *Method {
+ loc := service.Location.appendPath(genid.ServiceDescriptorProto_Method_field_number, desc.Index())
+ method := &Method{
+ Desc: desc,
+ GoName: strs.GoCamelCase(string(desc.Name())),
+ Parent: service,
+ Location: loc,
+ Comments: makeCommentSet(f.Desc.SourceLocations().ByDescriptor(desc)),
+ }
+ return method
+}
+
+func (method *Method) resolveDependencies(gen *Plugin) error {
+ desc := method.Desc
+
+ inName := desc.Input().FullName()
+ in, ok := gen.messagesByName[inName]
+ if !ok {
+ return fmt.Errorf("method %v: no descriptor for type %v", desc.FullName(), inName)
+ }
+ method.Input = in
+
+ outName := desc.Output().FullName()
+ out, ok := gen.messagesByName[outName]
+ if !ok {
+ return fmt.Errorf("method %v: no descriptor for type %v", desc.FullName(), outName)
+ }
+ method.Output = out
+
+ return nil
+}
+
+// A GeneratedFile is a generated file.
+type GeneratedFile struct {
+ gen *Plugin
+ skip bool
+ filename string
+ goImportPath GoImportPath
+ buf bytes.Buffer
+ packageNames map[GoImportPath]GoPackageName
+ usedPackageNames map[GoPackageName]bool
+ manualImports map[GoImportPath]bool
+ annotations map[string][]Location
+}
+
+// NewGeneratedFile creates a new generated file with the given filename
+// and import path.
+func (gen *Plugin) NewGeneratedFile(filename string, goImportPath GoImportPath) *GeneratedFile {
+ g := &GeneratedFile{
+ gen: gen,
+ filename: filename,
+ goImportPath: goImportPath,
+ packageNames: make(map[GoImportPath]GoPackageName),
+ usedPackageNames: make(map[GoPackageName]bool),
+ manualImports: make(map[GoImportPath]bool),
+ annotations: make(map[string][]Location),
+ }
+
+ // All predeclared identifiers in Go are already used.
+ for _, s := range types.Universe.Names() {
+ g.usedPackageNames[GoPackageName(s)] = true
+ }
+
+ gen.genFiles = append(gen.genFiles, g)
+ return g
+}
+
+// P prints a line to the generated output. It converts each parameter to a
+// string following the same rules as fmt.Print. It never inserts spaces
+// between parameters.
+func (g *GeneratedFile) P(v ...interface{}) {
+ for _, x := range v {
+ switch x := x.(type) {
+ case GoIdent:
+ fmt.Fprint(&g.buf, g.QualifiedGoIdent(x))
+ default:
+ fmt.Fprint(&g.buf, x)
+ }
+ }
+ fmt.Fprintln(&g.buf)
+}
+
+// QualifiedGoIdent returns the string to use for a Go identifier.
+//
+// If the identifier is from a different Go package than the generated file,
+// the returned name will be qualified (package.name) and an import statement
+// for the identifier's package will be included in the file.
+func (g *GeneratedFile) QualifiedGoIdent(ident GoIdent) string {
+ if ident.GoImportPath == g.goImportPath {
+ return ident.GoName
+ }
+ if packageName, ok := g.packageNames[ident.GoImportPath]; ok {
+ return string(packageName) + "." + ident.GoName
+ }
+ packageName := cleanPackageName(path.Base(string(ident.GoImportPath)))
+ for i, orig := 1, packageName; g.usedPackageNames[packageName]; i++ {
+ packageName = orig + GoPackageName(strconv.Itoa(i))
+ }
+ g.packageNames[ident.GoImportPath] = packageName
+ g.usedPackageNames[packageName] = true
+ return string(packageName) + "." + ident.GoName
+}
+
+// Import ensures a package is imported by the generated file.
+//
+// Packages referenced by QualifiedGoIdent are automatically imported.
+// Explicitly importing a package with Import is generally only necessary
+// when the import will be blank (import _ "package").
+func (g *GeneratedFile) Import(importPath GoImportPath) {
+ g.manualImports[importPath] = true
+}
+
+// Write implements io.Writer.
+func (g *GeneratedFile) Write(p []byte) (n int, err error) {
+ return g.buf.Write(p)
+}
+
+// Skip removes the generated file from the plugin output.
+func (g *GeneratedFile) Skip() {
+ g.skip = true
+}
+
+// Unskip reverts a previous call to Skip, re-including the generated file in
+// the plugin output.
+func (g *GeneratedFile) Unskip() {
+ g.skip = false
+}
+
+// Annotate associates a symbol in a generated Go file with a location in a
+// source .proto file.
+//
+// The symbol may refer to a type, constant, variable, function, method, or
+// struct field. The "T.sel" syntax is used to identify the method or field
+// 'sel' on type 'T'.
+func (g *GeneratedFile) Annotate(symbol string, loc Location) {
+ g.annotations[symbol] = append(g.annotations[symbol], loc)
+}
+
+// Content returns the contents of the generated file.
+func (g *GeneratedFile) Content() ([]byte, error) {
+ if !strings.HasSuffix(g.filename, ".go") {
+ return g.buf.Bytes(), nil
+ }
+
+ // Reformat generated code.
+ original := g.buf.Bytes()
+ fset := token.NewFileSet()
+ file, err := parser.ParseFile(fset, "", original, parser.ParseComments)
+ if err != nil {
+ // Print out the bad code with line numbers.
+ // This should never happen in practice, but it can while changing generated code
+ // so consider this a debugging aid.
+ var src bytes.Buffer
+ s := bufio.NewScanner(bytes.NewReader(original))
+ for line := 1; s.Scan(); line++ {
+ fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes())
+ }
+ return nil, fmt.Errorf("%v: unparsable Go source: %v\n%v", g.filename, err, src.String())
+ }
+
+ // Collect a sorted list of all imports.
+ var importPaths [][2]string
+ rewriteImport := func(importPath string) string {
+ if f := g.gen.opts.ImportRewriteFunc; f != nil {
+ return string(f(GoImportPath(importPath)))
+ }
+ return importPath
+ }
+ for importPath := range g.packageNames {
+ pkgName := string(g.packageNames[GoImportPath(importPath)])
+ pkgPath := rewriteImport(string(importPath))
+ importPaths = append(importPaths, [2]string{pkgName, pkgPath})
+ }
+ for importPath := range g.manualImports {
+ if _, ok := g.packageNames[importPath]; !ok {
+ pkgPath := rewriteImport(string(importPath))
+ importPaths = append(importPaths, [2]string{"_", pkgPath})
+ }
+ }
+ sort.Slice(importPaths, func(i, j int) bool {
+ return importPaths[i][1] < importPaths[j][1]
+ })
+
+ // Modify the AST to include a new import block.
+ if len(importPaths) > 0 {
+ // Insert block after package statement or
+ // possible comment attached to the end of the package statement.
+ pos := file.Package
+ tokFile := fset.File(file.Package)
+ pkgLine := tokFile.Line(file.Package)
+ for _, c := range file.Comments {
+ if tokFile.Line(c.Pos()) > pkgLine {
+ break
+ }
+ pos = c.End()
+ }
+
+ // Construct the import block.
+ impDecl := &ast.GenDecl{
+ Tok: token.IMPORT,
+ TokPos: pos,
+ Lparen: pos,
+ Rparen: pos,
+ }
+ for _, importPath := range importPaths {
+ impDecl.Specs = append(impDecl.Specs, &ast.ImportSpec{
+ Name: &ast.Ident{
+ Name: importPath[0],
+ NamePos: pos,
+ },
+ Path: &ast.BasicLit{
+ Kind: token.STRING,
+ Value: strconv.Quote(importPath[1]),
+ ValuePos: pos,
+ },
+ EndPos: pos,
+ })
+ }
+ file.Decls = append([]ast.Decl{impDecl}, file.Decls...)
+ }
+
+ var out bytes.Buffer
+ if err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(&out, fset, file); err != nil {
+ return nil, fmt.Errorf("%v: can not reformat Go source: %v", g.filename, err)
+ }
+ return out.Bytes(), nil
+}
+
+// metaFile returns the contents of the file's metadata file, which is a
+// text formatted string of the google.protobuf.GeneratedCodeInfo.
+func (g *GeneratedFile) metaFile(content []byte) (string, error) {
+ fset := token.NewFileSet()
+ astFile, err := parser.ParseFile(fset, "", content, 0)
+ if err != nil {
+ return "", err
+ }
+ info := &descriptorpb.GeneratedCodeInfo{}
+
+ seenAnnotations := make(map[string]bool)
+ annotate := func(s string, ident *ast.Ident) {
+ seenAnnotations[s] = true
+ for _, loc := range g.annotations[s] {
+ info.Annotation = append(info.Annotation, &descriptorpb.GeneratedCodeInfo_Annotation{
+ SourceFile: proto.String(loc.SourceFile),
+ Path: loc.Path,
+ Begin: proto.Int32(int32(fset.Position(ident.Pos()).Offset)),
+ End: proto.Int32(int32(fset.Position(ident.End()).Offset)),
+ })
+ }
+ }
+ for _, decl := range astFile.Decls {
+ switch decl := decl.(type) {
+ case *ast.GenDecl:
+ for _, spec := range decl.Specs {
+ switch spec := spec.(type) {
+ case *ast.TypeSpec:
+ annotate(spec.Name.Name, spec.Name)
+ switch st := spec.Type.(type) {
+ case *ast.StructType:
+ for _, field := range st.Fields.List {
+ for _, name := range field.Names {
+ annotate(spec.Name.Name+"."+name.Name, name)
+ }
+ }
+ case *ast.InterfaceType:
+ for _, field := range st.Methods.List {
+ for _, name := range field.Names {
+ annotate(spec.Name.Name+"."+name.Name, name)
+ }
+ }
+ }
+ case *ast.ValueSpec:
+ for _, name := range spec.Names {
+ annotate(name.Name, name)
+ }
+ }
+ }
+ case *ast.FuncDecl:
+ if decl.Recv == nil {
+ annotate(decl.Name.Name, decl.Name)
+ } else {
+ recv := decl.Recv.List[0].Type
+ if s, ok := recv.(*ast.StarExpr); ok {
+ recv = s.X
+ }
+ if id, ok := recv.(*ast.Ident); ok {
+ annotate(id.Name+"."+decl.Name.Name, decl.Name)
+ }
+ }
+ }
+ }
+ for a := range g.annotations {
+ if !seenAnnotations[a] {
+ return "", fmt.Errorf("%v: no symbol matching annotation %q", g.filename, a)
+ }
+ }
+
+ b, err := prototext.Marshal(info)
+ if err != nil {
+ return "", err
+ }
+ return string(b), nil
+}
+
+// A GoIdent is a Go identifier, consisting of a name and import path.
+// The name is a single identifier and may not be a dot-qualified selector.
+type GoIdent struct {
+ GoName string
+ GoImportPath GoImportPath
+}
+
+func (id GoIdent) String() string { return fmt.Sprintf("%q.%v", id.GoImportPath, id.GoName) }
+
+// newGoIdent returns the Go identifier for a descriptor.
+func newGoIdent(f *File, d protoreflect.Descriptor) GoIdent {
+ name := strings.TrimPrefix(string(d.FullName()), string(f.Desc.Package())+".")
+ return GoIdent{
+ GoName: strs.GoCamelCase(name),
+ GoImportPath: f.GoImportPath,
+ }
+}
+
+// A GoImportPath is the import path of a Go package.
+// For example: "google.golang.org/protobuf/compiler/protogen"
+type GoImportPath string
+
+func (p GoImportPath) String() string { return strconv.Quote(string(p)) }
+
+// Ident returns a GoIdent with s as the GoName and p as the GoImportPath.
+func (p GoImportPath) Ident(s string) GoIdent {
+ return GoIdent{GoName: s, GoImportPath: p}
+}
+
+// A GoPackageName is the name of a Go package. e.g., "protobuf".
+type GoPackageName string
+
+// cleanPackageName converts a string to a valid Go package name.
+func cleanPackageName(name string) GoPackageName {
+ return GoPackageName(strs.GoSanitized(name))
+}
+
+type pathType int
+
+const (
+ pathTypeImport pathType = iota
+ pathTypeSourceRelative
+)
+
+// A Location is a location in a .proto source file.
+//
+// See the google.protobuf.SourceCodeInfo documentation in descriptor.proto
+// for details.
+type Location struct {
+ SourceFile string
+ Path protoreflect.SourcePath
+}
+
+// appendPath add elements to a Location's path, returning a new Location.
+func (loc Location) appendPath(num protoreflect.FieldNumber, idx int) Location {
+ loc.Path = append(protoreflect.SourcePath(nil), loc.Path...) // make copy
+ loc.Path = append(loc.Path, int32(num), int32(idx))
+ return loc
+}
+
+// CommentSet is a set of leading and trailing comments associated
+// with a .proto descriptor declaration.
+type CommentSet struct {
+ LeadingDetached []Comments
+ Leading Comments
+ Trailing Comments
+}
+
+func makeCommentSet(loc protoreflect.SourceLocation) CommentSet {
+ var leadingDetached []Comments
+ for _, s := range loc.LeadingDetachedComments {
+ leadingDetached = append(leadingDetached, Comments(s))
+ }
+ return CommentSet{
+ LeadingDetached: leadingDetached,
+ Leading: Comments(loc.LeadingComments),
+ Trailing: Comments(loc.TrailingComments),
+ }
+}
+
+// Comments is a comments string as provided by protoc.
+type Comments string
+
+// String formats the comments by inserting // to the start of each line,
+// ensuring that there is a trailing newline.
+// An empty comment is formatted as an empty string.
+func (c Comments) String() string {
+ if c == "" {
+ return ""
+ }
+ var b []byte
+ for _, line := range strings.Split(strings.TrimSuffix(string(c), "\n"), "\n") {
+ b = append(b, "//"...)
+ b = append(b, line...)
+ b = append(b, "\n"...)
+ }
+ return string(b)
+}
+
+// extensionRegistry allows registration of new extensions defined in the .proto
+// file for which we are generating bindings.
+//
+// Lookups consult the local type registry first and fall back to the base type
+// registry which defaults to protoregistry.GlobalTypes
+type extensionRegistry struct {
+ base *protoregistry.Types
+ local *protoregistry.Types
+}
+
+func newExtensionRegistry() *extensionRegistry {
+ return &extensionRegistry{
+ base: protoregistry.GlobalTypes,
+ local: &protoregistry.Types{},
+ }
+}
+
+// FindExtensionByName implements proto.UnmarshalOptions.FindExtensionByName
+func (e *extensionRegistry) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+ if xt, err := e.local.FindExtensionByName(field); err == nil {
+ return xt, nil
+ }
+
+ return e.base.FindExtensionByName(field)
+}
+
+// FindExtensionByNumber implements proto.UnmarshalOptions.FindExtensionByNumber
+func (e *extensionRegistry) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+ if xt, err := e.local.FindExtensionByNumber(message, field); err == nil {
+ return xt, nil
+ }
+
+ return e.base.FindExtensionByNumber(message, field)
+}
+
+func (e *extensionRegistry) hasNovelExtensions() bool {
+ return e.local.NumExtensions() > 0
+}
+
+func (e *extensionRegistry) registerAllExtensionsFromFile(f protoreflect.FileDescriptor) error {
+ if err := e.registerAllExtensions(f.Extensions()); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (e *extensionRegistry) registerAllExtensionsFromMessage(ms protoreflect.MessageDescriptors) error {
+ for i := 0; i < ms.Len(); i++ {
+ m := ms.Get(i)
+ if err := e.registerAllExtensions(m.Extensions()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (e *extensionRegistry) registerAllExtensions(exts protoreflect.ExtensionDescriptors) error {
+ for i := 0; i < exts.Len(); i++ {
+ if err := e.registerExtension(exts.Get(i)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// registerExtension adds the given extension to the type registry if an
+// extension with that full name does not exist yet.
+func (e *extensionRegistry) registerExtension(xd protoreflect.ExtensionDescriptor) error {
+ if _, err := e.FindExtensionByName(xd.FullName()); err != protoregistry.NotFound {
+ // Either the extension already exists or there was an error, either way we're done.
+ return err
+ }
+ return e.local.RegisterExtension(dynamicpb.NewExtensionType(xd))
+}
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go
index 00ea2fec..21d5d2cb 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go
@@ -4,7 +4,7 @@
// Package protojson marshals and unmarshals protocol buffer messages as JSON
// format. It follows the guide at
-// https://developers.google.com/protocol-buffers/docs/proto3#json.
+// https://protobuf.dev/programming-guides/proto3#json.
//
// This package produces a different output than the standard "encoding/json"
// package, which does not operate correctly on protocol buffer messages.
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
index c85f8469..6c37d417 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
@@ -814,16 +814,22 @@ func (d decoder) unmarshalTimestamp(m protoreflect.Message) error {
return d.unexpectedTokenError(tok)
}
- t, err := time.Parse(time.RFC3339Nano, tok.ParsedString())
+ s := tok.ParsedString()
+ t, err := time.Parse(time.RFC3339Nano, s)
if err != nil {
return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString())
}
- // Validate seconds. No need to validate nanos because time.Parse would have
- // covered that already.
+ // Validate seconds.
secs := t.Unix()
if secs < minTimestampSeconds || secs > maxTimestampSeconds {
return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString())
}
+ // Validate subseconds.
+ i := strings.LastIndexByte(s, '.') // start of subsecond field
+ j := strings.LastIndexAny(s, "Z-+") // start of timezone field
+ if i >= 0 && j >= i && j-i > len(".999999999") {
+ return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString())
+ }
fds := m.Descriptor().Fields()
fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number)
diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
index ce57f57e..f4b4686c 100644
--- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
+++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// Package protowire parses and formats the raw wire encoding.
-// See https://developers.google.com/protocol-buffers/docs/encoding.
+// See https://protobuf.dev/programming-guides/encoding.
//
// For marshaling and unmarshaling entire protobuf messages,
// use the "google.golang.org/protobuf/proto" package instead.
@@ -29,12 +29,8 @@ const (
)
// IsValid reports whether the field number is semantically valid.
-//
-// Note that while numbers within the reserved range are semantically invalid,
-// they are syntactically valid in the wire format.
-// Implementations may treat records with reserved field numbers as unknown.
func (n Number) IsValid() bool {
- return MinValidNumber <= n && n < FirstReservedNumber || LastReservedNumber < n && n <= MaxValidNumber
+ return MinValidNumber <= n && n <= MaxValidNumber
}
// Type represents the wire type.
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
index b13fd29e..d043a6eb 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
@@ -294,7 +294,7 @@ func (d *Decoder) isValueNext() bool {
}
// consumeToken constructs a Token for given Kind with raw value derived from
-// current d.in and given size, and consumes the given size-lenght of it.
+// current d.in and given size, and consumes the given size-length of it.
func (d *Decoder) consumeToken(kind Kind, size int) Token {
tok := Token{
kind: kind,
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
index 427c62d0..87853e78 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
@@ -412,12 +412,13 @@ func (d *Decoder) parseFieldName() (tok Token, err error) {
// Field number. Identify if input is a valid number that is not negative
// and is decimal integer within 32-bit range.
if num := parseNumber(d.in); num.size > 0 {
+ str := num.string(d.in)
if !num.neg && num.kind == numDec {
- if _, err := strconv.ParseInt(string(d.in[:num.size]), 10, 32); err == nil {
+ if _, err := strconv.ParseInt(str, 10, 32); err == nil {
return d.consumeToken(Name, num.size, uint8(FieldNumber)), nil
}
}
- return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size])
+ return Token{}, d.newSyntaxError("invalid field number: %s", str)
}
return Token{}, d.newSyntaxError("invalid field name: %s", errId(d.in))
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go
index 81a5d8c8..45c81f02 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go
@@ -15,17 +15,12 @@ func (d *Decoder) parseNumberValue() (Token, bool) {
if num.neg {
numAttrs |= isNegative
}
- strSize := num.size
- last := num.size - 1
- if num.kind == numFloat && (d.in[last] == 'f' || d.in[last] == 'F') {
- strSize = last
- }
tok := Token{
kind: Scalar,
attrs: numberValue,
pos: len(d.orig) - len(d.in),
raw: d.in[:num.size],
- str: string(d.in[:strSize]),
+ str: num.string(d.in),
numAttrs: numAttrs,
}
d.consume(num.size)
@@ -46,6 +41,27 @@ type number struct {
kind uint8
neg bool
size int
+ // if neg, this is the length of whitespace and comments between
+ // the minus sign and the rest fo the number literal
+ sep int
+}
+
+func (num number) string(data []byte) string {
+ strSize := num.size
+ last := num.size - 1
+ if num.kind == numFloat && (data[last] == 'f' || data[last] == 'F') {
+ strSize = last
+ }
+ if num.neg && num.sep > 0 {
+ // strip whitespace/comments between negative sign and the rest
+ strLen := strSize - num.sep
+ str := make([]byte, strLen)
+ str[0] = data[0]
+ copy(str[1:], data[num.sep+1:strSize])
+ return string(str)
+ }
+ return string(data[:strSize])
+
}
// parseNumber constructs a number object from given input. It allows for the
@@ -67,19 +83,22 @@ func parseNumber(input []byte) number {
}
// Optional -
+ var sep int
if s[0] == '-' {
neg = true
s = s[1:]
size++
+ // Consume any whitespace or comments between the
+ // negative sign and the rest of the number
+ lenBefore := len(s)
+ s = consume(s, 0)
+ sep = lenBefore - len(s)
+ size += sep
if len(s) == 0 {
return number{}
}
}
- // C++ allows for whitespace and comments in between the negative sign and
- // the rest of the number. This logic currently does not but is consistent
- // with v1.
-
switch {
case s[0] == '0':
if len(s) > 1 {
@@ -116,7 +135,7 @@ func parseNumber(input []byte) number {
if len(s) > 0 && !isDelim(s[0]) {
return number{}
}
- return number{kind: kind, neg: neg, size: size}
+ return number{kind: kind, neg: neg, size: size, sep: sep}
}
}
s = s[1:]
@@ -188,5 +207,5 @@ func parseNumber(input []byte) number {
return number{}
}
- return number{kind: kind, neg: neg, size: size}
+ return number{kind: kind, neg: neg, size: size, sep: sep}
}
diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
index e3cdf1c2..5c0e8f73 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
@@ -50,6 +50,7 @@ const (
FileDescriptorProto_Options_field_name protoreflect.Name = "options"
FileDescriptorProto_SourceCodeInfo_field_name protoreflect.Name = "source_code_info"
FileDescriptorProto_Syntax_field_name protoreflect.Name = "syntax"
+ FileDescriptorProto_Edition_field_name protoreflect.Name = "edition"
FileDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.name"
FileDescriptorProto_Package_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.package"
@@ -63,6 +64,7 @@ const (
FileDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.options"
FileDescriptorProto_SourceCodeInfo_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.source_code_info"
FileDescriptorProto_Syntax_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.syntax"
+ FileDescriptorProto_Edition_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.edition"
)
// Field numbers for google.protobuf.FileDescriptorProto.
@@ -79,6 +81,7 @@ const (
FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8
FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9
FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12
+ FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 13
)
// Names for google.protobuf.DescriptorProto.
@@ -494,26 +497,29 @@ const (
// Field names for google.protobuf.MessageOptions.
const (
- MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format"
- MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor"
- MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated"
- MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry"
- MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
+ MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format"
+ MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor"
+ MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated"
+ MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry"
+ MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts"
+ MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
- MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format"
- MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor"
- MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated"
- MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry"
- MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option"
+ MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format"
+ MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor"
+ MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated"
+ MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry"
+ MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts"
+ MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option"
)
// Field numbers for google.protobuf.MessageOptions.
const (
- MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1
- MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2
- MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3
- MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7
- MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
+ MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1
+ MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2
+ MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3
+ MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7
+ MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 11
+ MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
// Names for google.protobuf.FieldOptions.
@@ -528,16 +534,24 @@ const (
FieldOptions_Packed_field_name protoreflect.Name = "packed"
FieldOptions_Jstype_field_name protoreflect.Name = "jstype"
FieldOptions_Lazy_field_name protoreflect.Name = "lazy"
+ FieldOptions_UnverifiedLazy_field_name protoreflect.Name = "unverified_lazy"
FieldOptions_Deprecated_field_name protoreflect.Name = "deprecated"
FieldOptions_Weak_field_name protoreflect.Name = "weak"
+ FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact"
+ FieldOptions_Retention_field_name protoreflect.Name = "retention"
+ FieldOptions_Target_field_name protoreflect.Name = "target"
FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype"
FieldOptions_Packed_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.packed"
FieldOptions_Jstype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.jstype"
FieldOptions_Lazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.lazy"
+ FieldOptions_UnverifiedLazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.unverified_lazy"
FieldOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.deprecated"
FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak"
+ FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact"
+ FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention"
+ FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target"
FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option"
)
@@ -547,8 +561,12 @@ const (
FieldOptions_Packed_field_number protoreflect.FieldNumber = 2
FieldOptions_Jstype_field_number protoreflect.FieldNumber = 6
FieldOptions_Lazy_field_number protoreflect.FieldNumber = 5
+ FieldOptions_UnverifiedLazy_field_number protoreflect.FieldNumber = 15
FieldOptions_Deprecated_field_number protoreflect.FieldNumber = 3
FieldOptions_Weak_field_number protoreflect.FieldNumber = 10
+ FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16
+ FieldOptions_Retention_field_number protoreflect.FieldNumber = 17
+ FieldOptions_Target_field_number protoreflect.FieldNumber = 18
FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@@ -564,6 +582,18 @@ const (
FieldOptions_JSType_enum_name = "JSType"
)
+// Full and short names for google.protobuf.FieldOptions.OptionRetention.
+const (
+ FieldOptions_OptionRetention_enum_fullname = "google.protobuf.FieldOptions.OptionRetention"
+ FieldOptions_OptionRetention_enum_name = "OptionRetention"
+)
+
+// Full and short names for google.protobuf.FieldOptions.OptionTargetType.
+const (
+ FieldOptions_OptionTargetType_enum_fullname = "google.protobuf.FieldOptions.OptionTargetType"
+ FieldOptions_OptionTargetType_enum_name = "OptionTargetType"
+)
+
// Names for google.protobuf.OneofOptions.
const (
OneofOptions_message_name protoreflect.Name = "OneofOptions"
@@ -590,20 +620,23 @@ const (
// Field names for google.protobuf.EnumOptions.
const (
- EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias"
- EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated"
- EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
+ EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias"
+ EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated"
+ EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts"
+ EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
- EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias"
- EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated"
- EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option"
+ EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias"
+ EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated"
+ EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts"
+ EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option"
)
// Field numbers for google.protobuf.EnumOptions.
const (
- EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2
- EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3
- EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
+ EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2
+ EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3
+ EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 6
+ EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
// Names for google.protobuf.EnumValueOptions.
@@ -813,11 +846,13 @@ const (
GeneratedCodeInfo_Annotation_SourceFile_field_name protoreflect.Name = "source_file"
GeneratedCodeInfo_Annotation_Begin_field_name protoreflect.Name = "begin"
GeneratedCodeInfo_Annotation_End_field_name protoreflect.Name = "end"
+ GeneratedCodeInfo_Annotation_Semantic_field_name protoreflect.Name = "semantic"
GeneratedCodeInfo_Annotation_Path_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.path"
GeneratedCodeInfo_Annotation_SourceFile_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.source_file"
GeneratedCodeInfo_Annotation_Begin_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.begin"
GeneratedCodeInfo_Annotation_End_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.end"
+ GeneratedCodeInfo_Annotation_Semantic_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.semantic"
)
// Field numbers for google.protobuf.GeneratedCodeInfo.Annotation.
@@ -826,4 +861,11 @@ const (
GeneratedCodeInfo_Annotation_SourceFile_field_number protoreflect.FieldNumber = 2
GeneratedCodeInfo_Annotation_Begin_field_number protoreflect.FieldNumber = 3
GeneratedCodeInfo_Annotation_End_field_number protoreflect.FieldNumber = 4
+ GeneratedCodeInfo_Annotation_Semantic_field_number protoreflect.FieldNumber = 5
+)
+
+// Full and short names for google.protobuf.GeneratedCodeInfo.Annotation.Semantic.
+const (
+ GeneratedCodeInfo_Annotation_Semantic_enum_fullname = "google.protobuf.GeneratedCodeInfo.Annotation.Semantic"
+ GeneratedCodeInfo_Annotation_Semantic_enum_name = "Semantic"
)
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go
index 11a6128b..185ef2ef 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go
@@ -59,7 +59,6 @@ func NewConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter {
default:
return newSingularConverter(t, fd)
}
- panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName()))
}
var (
diff --git a/vendor/google.golang.org/protobuf/internal/msgfmt/format.go b/vendor/google.golang.org/protobuf/internal/msgfmt/format.go
new file mode 100644
index 00000000..a319550f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/msgfmt/format.go
@@ -0,0 +1,261 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package msgfmt implements a text marshaler combining the desirable features
+// of both the JSON and proto text formats.
+// It is optimized for human readability and has no associated deserializer.
+package msgfmt
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/detrand"
+ "google.golang.org/protobuf/internal/genid"
+ "google.golang.org/protobuf/internal/order"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// Format returns a formatted string for the message.
+func Format(m proto.Message) string {
+ return string(appendMessage(nil, m.ProtoReflect()))
+}
+
+// FormatValue returns a formatted string for an arbitrary value.
+func FormatValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) string {
+ return string(appendValue(nil, v, fd))
+}
+
+func appendValue(b []byte, v protoreflect.Value, fd protoreflect.FieldDescriptor) []byte {
+ switch v := v.Interface().(type) {
+ case nil:
+ return append(b, ""...)
+ case bool, int32, int64, uint32, uint64, float32, float64:
+ return append(b, fmt.Sprint(v)...)
+ case string:
+ return append(b, strconv.Quote(string(v))...)
+ case []byte:
+ return append(b, strconv.Quote(string(v))...)
+ case protoreflect.EnumNumber:
+ return appendEnum(b, v, fd)
+ case protoreflect.Message:
+ return appendMessage(b, v)
+ case protoreflect.List:
+ return appendList(b, v, fd)
+ case protoreflect.Map:
+ return appendMap(b, v, fd)
+ default:
+ panic(fmt.Sprintf("invalid type: %T", v))
+ }
+}
+
+func appendEnum(b []byte, v protoreflect.EnumNumber, fd protoreflect.FieldDescriptor) []byte {
+ if fd != nil {
+ if ev := fd.Enum().Values().ByNumber(v); ev != nil {
+ return append(b, ev.Name()...)
+ }
+ }
+ return strconv.AppendInt(b, int64(v), 10)
+}
+
+func appendMessage(b []byte, m protoreflect.Message) []byte {
+ if b2 := appendKnownMessage(b, m); b2 != nil {
+ return b2
+ }
+
+ b = append(b, '{')
+ order.RangeFields(m, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ b = append(b, fd.TextName()...)
+ b = append(b, ':')
+ b = appendValue(b, v, fd)
+ b = append(b, delim()...)
+ return true
+ })
+ b = appendUnknown(b, m.GetUnknown())
+ b = bytes.TrimRight(b, delim())
+ b = append(b, '}')
+ return b
+}
+
+var protocmpMessageType = reflect.TypeOf(map[string]interface{}(nil))
+
+func appendKnownMessage(b []byte, m protoreflect.Message) []byte {
+ md := m.Descriptor()
+ fds := md.Fields()
+ switch md.FullName() {
+ case genid.Any_message_fullname:
+ var msgVal protoreflect.Message
+ url := m.Get(fds.ByNumber(genid.Any_TypeUrl_field_number)).String()
+ if v := reflect.ValueOf(m); v.Type().ConvertibleTo(protocmpMessageType) {
+ // For protocmp.Message, directly obtain the sub-message value
+ // which is stored in structured form, rather than as raw bytes.
+ m2 := v.Convert(protocmpMessageType).Interface().(map[string]interface{})
+ v, ok := m2[string(genid.Any_Value_field_name)].(proto.Message)
+ if !ok {
+ return nil
+ }
+ msgVal = v.ProtoReflect()
+ } else {
+ val := m.Get(fds.ByNumber(genid.Any_Value_field_number)).Bytes()
+ mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
+ if err != nil {
+ return nil
+ }
+ msgVal = mt.New()
+ err = proto.UnmarshalOptions{AllowPartial: true}.Unmarshal(val, msgVal.Interface())
+ if err != nil {
+ return nil
+ }
+ }
+
+ b = append(b, '{')
+ b = append(b, "["+url+"]"...)
+ b = append(b, ':')
+ b = appendMessage(b, msgVal)
+ b = append(b, '}')
+ return b
+
+ case genid.Timestamp_message_fullname:
+ secs := m.Get(fds.ByNumber(genid.Timestamp_Seconds_field_number)).Int()
+ nanos := m.Get(fds.ByNumber(genid.Timestamp_Nanos_field_number)).Int()
+ if nanos < 0 || nanos >= 1e9 {
+ return nil
+ }
+ t := time.Unix(secs, nanos).UTC()
+ x := t.Format("2006-01-02T15:04:05.000000000") // RFC 3339
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, ".000")
+ return append(b, x+"Z"...)
+
+ case genid.Duration_message_fullname:
+ sign := ""
+ secs := m.Get(fds.ByNumber(genid.Duration_Seconds_field_number)).Int()
+ nanos := m.Get(fds.ByNumber(genid.Duration_Nanos_field_number)).Int()
+ if nanos <= -1e9 || nanos >= 1e9 || (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0) {
+ return nil
+ }
+ if secs < 0 || nanos < 0 {
+ sign, secs, nanos = "-", -1*secs, -1*nanos
+ }
+ x := fmt.Sprintf("%s%d.%09d", sign, secs, nanos)
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, ".000")
+ return append(b, x+"s"...)
+
+ case genid.BoolValue_message_fullname,
+ genid.Int32Value_message_fullname,
+ genid.Int64Value_message_fullname,
+ genid.UInt32Value_message_fullname,
+ genid.UInt64Value_message_fullname,
+ genid.FloatValue_message_fullname,
+ genid.DoubleValue_message_fullname,
+ genid.StringValue_message_fullname,
+ genid.BytesValue_message_fullname:
+ fd := fds.ByNumber(genid.WrapperValue_Value_field_number)
+ return appendValue(b, m.Get(fd), fd)
+ }
+
+ return nil
+}
+
+func appendUnknown(b []byte, raw protoreflect.RawFields) []byte {
+ rs := make(map[protoreflect.FieldNumber][]protoreflect.RawFields)
+ for len(raw) > 0 {
+ num, _, n := protowire.ConsumeField(raw)
+ rs[num] = append(rs[num], raw[:n])
+ raw = raw[n:]
+ }
+
+ var ns []protoreflect.FieldNumber
+ for n := range rs {
+ ns = append(ns, n)
+ }
+ sort.Slice(ns, func(i, j int) bool { return ns[i] < ns[j] })
+
+ for _, n := range ns {
+ var leftBracket, rightBracket string
+ if len(rs[n]) > 1 {
+ leftBracket, rightBracket = "[", "]"
+ }
+
+ b = strconv.AppendInt(b, int64(n), 10)
+ b = append(b, ':')
+ b = append(b, leftBracket...)
+ for _, r := range rs[n] {
+ num, typ, n := protowire.ConsumeTag(r)
+ r = r[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, _ := protowire.ConsumeVarint(r)
+ b = strconv.AppendInt(b, int64(v), 10)
+ case protowire.Fixed32Type:
+ v, _ := protowire.ConsumeFixed32(r)
+ b = append(b, fmt.Sprintf("0x%08x", v)...)
+ case protowire.Fixed64Type:
+ v, _ := protowire.ConsumeFixed64(r)
+ b = append(b, fmt.Sprintf("0x%016x", v)...)
+ case protowire.BytesType:
+ v, _ := protowire.ConsumeBytes(r)
+ b = strconv.AppendQuote(b, string(v))
+ case protowire.StartGroupType:
+ v, _ := protowire.ConsumeGroup(num, r)
+ b = append(b, '{')
+ b = appendUnknown(b, v)
+ b = bytes.TrimRight(b, delim())
+ b = append(b, '}')
+ default:
+ panic(fmt.Sprintf("invalid type: %v", typ))
+ }
+ b = append(b, delim()...)
+ }
+ b = bytes.TrimRight(b, delim())
+ b = append(b, rightBracket...)
+ b = append(b, delim()...)
+ }
+ return b
+}
+
+func appendList(b []byte, v protoreflect.List, fd protoreflect.FieldDescriptor) []byte {
+ b = append(b, '[')
+ for i := 0; i < v.Len(); i++ {
+ b = appendValue(b, v.Get(i), fd)
+ b = append(b, delim()...)
+ }
+ b = bytes.TrimRight(b, delim())
+ b = append(b, ']')
+ return b
+}
+
+func appendMap(b []byte, v protoreflect.Map, fd protoreflect.FieldDescriptor) []byte {
+ b = append(b, '{')
+ order.RangeEntries(v, order.GenericKeyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool {
+ b = appendValue(b, k.Value(), fd.MapKey())
+ b = append(b, ':')
+ b = appendValue(b, v, fd.MapValue())
+ b = append(b, delim()...)
+ return true
+ })
+ b = bytes.TrimRight(b, delim())
+ b = append(b, '}')
+ return b
+}
+
+func delim() string {
+ // Deliberately introduce instability into the message string to
+ // discourage users from depending on it.
+ if detrand.Bool() {
+ return " "
+ }
+ return ", "
+}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
index fea589c4..61a84d34 100644
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
@@ -87,7 +87,7 @@ func (sb *Builder) grow(n int) {
// Unlike strings.Builder, we do not need to copy over the contents
// of the old buffer since our builder provides no API for
// retrieving previously created strings.
- sb.buf = make([]byte, 2*(cap(sb.buf)+n))
+ sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n))
}
func (sb *Builder) last(n int) string {
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index b480c501..f7014cd5 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -51,8 +51,8 @@ import (
// 10. Send out the CL for review and submit it.
const (
Major = 1
- Minor = 28
- Patch = 1
+ Minor = 30
+ Patch = 0
PreRelease = ""
)
diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go
index 08d2a46f..ec71e717 100644
--- a/vendor/google.golang.org/protobuf/proto/doc.go
+++ b/vendor/google.golang.org/protobuf/proto/doc.go
@@ -5,16 +5,13 @@
// Package proto provides functions operating on protocol buffer messages.
//
// For documentation on protocol buffers in general, see:
-//
-// https://developers.google.com/protocol-buffers
+// https://protobuf.dev.
//
// For a tutorial on using protocol buffers with Go, see:
-//
-// https://developers.google.com/protocol-buffers/docs/gotutorial
+// https://protobuf.dev/getting-started/gotutorial.
//
// For a guide to generated Go protocol buffer code, see:
-//
-// https://developers.google.com/protocol-buffers/docs/reference/go-generated
+// https://protobuf.dev/reference/go/go-generated.
//
// # Binary serialization
//
diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go
index 67948dd1..1a0be1b0 100644
--- a/vendor/google.golang.org/protobuf/proto/equal.go
+++ b/vendor/google.golang.org/protobuf/proto/equal.go
@@ -5,30 +5,39 @@
package proto
import (
- "bytes"
- "math"
"reflect"
- "google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/reflect/protoreflect"
)
-// Equal reports whether two messages are equal.
-// If two messages marshal to the same bytes under deterministic serialization,
-// then Equal is guaranteed to report true.
+// Equal reports whether two messages are equal,
+// by recursively comparing the fields of the message.
//
-// Two messages are equal if they belong to the same message descriptor,
-// have the same set of populated known and extension field values,
-// and the same set of unknown fields values. If either of the top-level
-// messages are invalid, then Equal reports true only if both are invalid.
+// - Bytes fields are equal if they contain identical bytes.
+// Empty bytes (regardless of nil-ness) are considered equal.
//
-// Scalar values are compared with the equivalent of the == operator in Go,
-// except bytes values which are compared using bytes.Equal and
-// floating point values which specially treat NaNs as equal.
-// Message values are compared by recursively calling Equal.
-// Lists are equal if each element value is also equal.
-// Maps are equal if they have the same set of keys, where the pair of values
-// for each key is also equal.
+// - Floating-point fields are equal if they contain the same value.
+// Unlike the == operator, a NaN is equal to another NaN.
+//
+// - Other scalar fields are equal if they contain the same value.
+//
+// - Message fields are equal if they have
+// the same set of populated known and extension field values, and
+// the same set of unknown fields values.
+//
+// - Lists are equal if they are the same length and
+// each corresponding element is equal.
+//
+// - Maps are equal if they have the same set of keys and
+// the corresponding value for each key is equal.
+//
+// An invalid message is not equal to a valid message.
+// An invalid message is only equal to another invalid message of the
+// same type. An invalid message often corresponds to a nil pointer
+// of the concrete message type. For example, (*pb.M)(nil) is not equal
+// to &pb.M{}.
+// If two valid messages marshal to the same bytes under deterministic
+// serialization, then Equal is guaranteed to report true.
func Equal(x, y Message) bool {
if x == nil || y == nil {
return x == nil && y == nil
@@ -42,130 +51,7 @@ func Equal(x, y Message) bool {
if mx.IsValid() != my.IsValid() {
return false
}
- return equalMessage(mx, my)
-}
-
-// equalMessage compares two messages.
-func equalMessage(mx, my protoreflect.Message) bool {
- if mx.Descriptor() != my.Descriptor() {
- return false
- }
-
- nx := 0
- equal := true
- mx.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool {
- nx++
- vy := my.Get(fd)
- equal = my.Has(fd) && equalField(fd, vx, vy)
- return equal
- })
- if !equal {
- return false
- }
- ny := 0
- my.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool {
- ny++
- return true
- })
- if nx != ny {
- return false
- }
-
- return equalUnknown(mx.GetUnknown(), my.GetUnknown())
-}
-
-// equalField compares two fields.
-func equalField(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool {
- switch {
- case fd.IsList():
- return equalList(fd, x.List(), y.List())
- case fd.IsMap():
- return equalMap(fd, x.Map(), y.Map())
- default:
- return equalValue(fd, x, y)
- }
-}
-
-// equalMap compares two maps.
-func equalMap(fd protoreflect.FieldDescriptor, x, y protoreflect.Map) bool {
- if x.Len() != y.Len() {
- return false
- }
- equal := true
- x.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool {
- vy := y.Get(k)
- equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy)
- return equal
- })
- return equal
-}
-
-// equalList compares two lists.
-func equalList(fd protoreflect.FieldDescriptor, x, y protoreflect.List) bool {
- if x.Len() != y.Len() {
- return false
- }
- for i := x.Len() - 1; i >= 0; i-- {
- if !equalValue(fd, x.Get(i), y.Get(i)) {
- return false
- }
- }
- return true
-}
-
-// equalValue compares two singular values.
-func equalValue(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool {
- switch fd.Kind() {
- case protoreflect.BoolKind:
- return x.Bool() == y.Bool()
- case protoreflect.EnumKind:
- return x.Enum() == y.Enum()
- case protoreflect.Int32Kind, protoreflect.Sint32Kind,
- protoreflect.Int64Kind, protoreflect.Sint64Kind,
- protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind:
- return x.Int() == y.Int()
- case protoreflect.Uint32Kind, protoreflect.Uint64Kind,
- protoreflect.Fixed32Kind, protoreflect.Fixed64Kind:
- return x.Uint() == y.Uint()
- case protoreflect.FloatKind, protoreflect.DoubleKind:
- fx := x.Float()
- fy := y.Float()
- if math.IsNaN(fx) || math.IsNaN(fy) {
- return math.IsNaN(fx) && math.IsNaN(fy)
- }
- return fx == fy
- case protoreflect.StringKind:
- return x.String() == y.String()
- case protoreflect.BytesKind:
- return bytes.Equal(x.Bytes(), y.Bytes())
- case protoreflect.MessageKind, protoreflect.GroupKind:
- return equalMessage(x.Message(), y.Message())
- default:
- return x.Interface() == y.Interface()
- }
-}
-
-// equalUnknown compares unknown fields by direct comparison on the raw bytes
-// of each individual field number.
-func equalUnknown(x, y protoreflect.RawFields) bool {
- if len(x) != len(y) {
- return false
- }
- if bytes.Equal([]byte(x), []byte(y)) {
- return true
- }
-
- mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
- my := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
- for len(x) > 0 {
- fnum, _, n := protowire.ConsumeField(x)
- mx[fnum] = append(mx[fnum], x[:n]...)
- x = x[n:]
- }
- for len(y) > 0 {
- fnum, _, n := protowire.ConsumeField(y)
- my[fnum] = append(my[fnum], y[:n]...)
- y = y[n:]
- }
- return reflect.DeepEqual(mx, my)
+ vx := protoreflect.ValueOfMessage(mx)
+ vy := protoreflect.ValueOfMessage(my)
+ return vx.Equal(vy)
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protopath/path.go b/vendor/google.golang.org/protobuf/reflect/protopath/path.go
new file mode 100644
index 00000000..91562a82
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protopath/path.go
@@ -0,0 +1,122 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protopath provides functionality for
+// representing a sequence of protobuf reflection operations on a message.
+package protopath
+
+import (
+ "fmt"
+
+ "google.golang.org/protobuf/internal/msgfmt"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// NOTE: The Path and Values are separate types here since there are use cases
+// where you would like to "address" some value in a message with just the path
+// and don't have the value information available.
+//
+// This is different from how "github.com/google/go-cmp/cmp".Path operates,
+// which combines both path and value information together.
+// Since the cmp package itself is the only one ever constructing a cmp.Path,
+// it will always have the value available.
+
+// Path is a sequence of protobuf reflection steps applied to some root
+// protobuf message value to arrive at the current value.
+// The first step must be a Root step.
+type Path []Step
+
+// TODO: Provide a Parse function that parses something similar to or
+// perhaps identical to the output of Path.String.
+
+// Index returns the ith step in the path and supports negative indexing.
+// A negative index starts counting from the tail of the Path such that -1
+// refers to the last step, -2 refers to the second-to-last step, and so on.
+// It returns a zero Step value if the index is out-of-bounds.
+func (p Path) Index(i int) Step {
+ if i < 0 {
+ i = len(p) + i
+ }
+ if i < 0 || i >= len(p) {
+ return Step{}
+ }
+ return p[i]
+}
+
+// String returns a structured representation of the path
+// by concatenating the string representation of every path step.
+func (p Path) String() string {
+ var b []byte
+ for _, s := range p {
+ b = s.appendString(b)
+ }
+ return string(b)
+}
+
+// Values is a Path paired with a sequence of values at each step.
+// The lengths of Path and Values must be identical.
+// The first step must be a Root step and
+// the first value must be a concrete message value.
+type Values struct {
+ Path Path
+ Values []protoreflect.Value
+}
+
+// Len reports the length of the path and values.
+// If the path and values have differing length, it returns the minimum length.
+func (p Values) Len() int {
+ n := len(p.Path)
+ if n > len(p.Values) {
+ n = len(p.Values)
+ }
+ return n
+}
+
+// Index returns the ith step and value and supports negative indexing.
+// A negative index starts counting from the tail of the Values such that -1
+// refers to the last pair, -2 refers to the second-to-last pair, and so on.
+func (p Values) Index(i int) (out struct {
+ Step Step
+ Value protoreflect.Value
+}) {
+ // NOTE: This returns a single struct instead of two return values so that
+ // callers can make use of the the value in an expression:
+ // vs.Index(i).Value.Interface()
+ n := p.Len()
+ if i < 0 {
+ i = n + i
+ }
+ if i < 0 || i >= n {
+ return out
+ }
+ out.Step = p.Path[i]
+ out.Value = p.Values[i]
+ return out
+}
+
+// String returns a humanly readable representation of the path and last value.
+// Do not depend on the output being stable.
+//
+// For example:
+//
+// (path.to.MyMessage).list_field[5].map_field["hello"] = {hello: "world"}
+func (p Values) String() string {
+ n := p.Len()
+ if n == 0 {
+ return ""
+ }
+
+ // Determine the field descriptor associated with the last step.
+ var fd protoreflect.FieldDescriptor
+ last := p.Index(-1)
+ switch last.Step.kind {
+ case FieldAccessStep:
+ fd = last.Step.FieldDescriptor()
+ case MapIndexStep, ListIndexStep:
+ fd = p.Index(-2).Step.FieldDescriptor()
+ }
+
+ // Format the full path with the last value.
+ return fmt.Sprintf("%v = %v", p.Path[:n], msgfmt.FormatValue(last.Value, fd))
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protopath/step.go b/vendor/google.golang.org/protobuf/reflect/protopath/step.go
new file mode 100644
index 00000000..95ae85c5
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protopath/step.go
@@ -0,0 +1,241 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protopath
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "google.golang.org/protobuf/internal/encoding/text"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// StepKind identifies the kind of step operation.
+// Each kind of step corresponds with some protobuf reflection operation.
+type StepKind int
+
+const (
+ invalidStep StepKind = iota
+ // RootStep identifies a step as the Root step operation.
+ RootStep
+ // FieldAccessStep identifies a step as the FieldAccess step operation.
+ FieldAccessStep
+ // UnknownAccessStep identifies a step as the UnknownAccess step operation.
+ UnknownAccessStep
+ // ListIndexStep identifies a step as the ListIndex step operation.
+ ListIndexStep
+ // MapIndexStep identifies a step as the MapIndex step operation.
+ MapIndexStep
+ // AnyExpandStep identifies a step as the AnyExpand step operation.
+ AnyExpandStep
+)
+
+func (k StepKind) String() string {
+ switch k {
+ case invalidStep:
+ return ""
+ case RootStep:
+ return "Root"
+ case FieldAccessStep:
+ return "FieldAccess"
+ case UnknownAccessStep:
+ return "UnknownAccess"
+ case ListIndexStep:
+ return "ListIndex"
+ case MapIndexStep:
+ return "MapIndex"
+ case AnyExpandStep:
+ return "AnyExpand"
+ default:
+ return fmt.Sprintf("", k)
+ }
+}
+
+// Step is a union where only one step operation may be specified at a time.
+// The different kinds of steps are specified by the constants defined for
+// the StepKind type.
+type Step struct {
+ kind StepKind
+ desc protoreflect.Descriptor
+ key protoreflect.Value
+}
+
+// Root indicates the root message that a path is relative to.
+// It should always (and only ever) be the first step in a path.
+func Root(md protoreflect.MessageDescriptor) Step {
+ if md == nil {
+ panic("nil message descriptor")
+ }
+ return Step{kind: RootStep, desc: md}
+}
+
+// FieldAccess describes access of a field within a message.
+// Extension field accesses are also represented using a FieldAccess and
+// must be provided with a protoreflect.FieldDescriptor
+//
+// Within the context of Values,
+// the type of the previous step value is always a message, and
+// the type of the current step value is determined by the field descriptor.
+func FieldAccess(fd protoreflect.FieldDescriptor) Step {
+ if fd == nil {
+ panic("nil field descriptor")
+ } else if _, ok := fd.(protoreflect.ExtensionTypeDescriptor); !ok && fd.IsExtension() {
+ panic(fmt.Sprintf("extension field %q must implement protoreflect.ExtensionTypeDescriptor", fd.FullName()))
+ }
+ return Step{kind: FieldAccessStep, desc: fd}
+}
+
+// UnknownAccess describes access to the unknown fields within a message.
+//
+// Within the context of Values,
+// the type of the previous step value is always a message, and
+// the type of the current step value is always a bytes type.
+func UnknownAccess() Step {
+ return Step{kind: UnknownAccessStep}
+}
+
+// ListIndex describes index of an element within a list.
+//
+// Within the context of Values,
+// the type of the previous, previous step value is always a message,
+// the type of the previous step value is always a list, and
+// the type of the current step value is determined by the field descriptor.
+func ListIndex(i int) Step {
+ if i < 0 {
+ panic(fmt.Sprintf("invalid list index: %v", i))
+ }
+ return Step{kind: ListIndexStep, key: protoreflect.ValueOfInt64(int64(i))}
+}
+
+// MapIndex describes index of an entry within a map.
+// The key type is determined by field descriptor that the map belongs to.
+//
+// Within the context of Values,
+// the type of the previous previous step value is always a message,
+// the type of the previous step value is always a map, and
+// the type of the current step value is determined by the field descriptor.
+func MapIndex(k protoreflect.MapKey) Step {
+ if !k.IsValid() {
+ panic("invalid map index")
+ }
+ return Step{kind: MapIndexStep, key: k.Value()}
+}
+
+// AnyExpand describes expansion of a google.protobuf.Any message into
+// a structured representation of the underlying message.
+//
+// Within the context of Values,
+// the type of the previous step value is always a google.protobuf.Any message, and
+// the type of the current step value is always a message.
+func AnyExpand(md protoreflect.MessageDescriptor) Step {
+ if md == nil {
+ panic("nil message descriptor")
+ }
+ return Step{kind: AnyExpandStep, desc: md}
+}
+
+// MessageDescriptor returns the message descriptor for Root or AnyExpand steps,
+// otherwise it returns nil.
+func (s Step) MessageDescriptor() protoreflect.MessageDescriptor {
+ switch s.kind {
+ case RootStep, AnyExpandStep:
+ return s.desc.(protoreflect.MessageDescriptor)
+ default:
+ return nil
+ }
+}
+
+// FieldDescriptor returns the field descriptor for FieldAccess steps,
+// otherwise it returns nil.
+func (s Step) FieldDescriptor() protoreflect.FieldDescriptor {
+ switch s.kind {
+ case FieldAccessStep:
+ return s.desc.(protoreflect.FieldDescriptor)
+ default:
+ return nil
+ }
+}
+
+// ListIndex returns the list index for ListIndex steps,
+// otherwise it returns 0.
+func (s Step) ListIndex() int {
+ switch s.kind {
+ case ListIndexStep:
+ return int(s.key.Int())
+ default:
+ return 0
+ }
+}
+
+// MapIndex returns the map key for MapIndex steps,
+// otherwise it returns an invalid map key.
+func (s Step) MapIndex() protoreflect.MapKey {
+ switch s.kind {
+ case MapIndexStep:
+ return s.key.MapKey()
+ default:
+ return protoreflect.MapKey{}
+ }
+}
+
+// Kind reports which kind of step this is.
+func (s Step) Kind() StepKind {
+ return s.kind
+}
+
+func (s Step) String() string {
+ return string(s.appendString(nil))
+}
+
+func (s Step) appendString(b []byte) []byte {
+ switch s.kind {
+ case RootStep:
+ b = append(b, '(')
+ b = append(b, s.desc.FullName()...)
+ b = append(b, ')')
+ case FieldAccessStep:
+ b = append(b, '.')
+ if fd := s.desc.(protoreflect.FieldDescriptor); fd.IsExtension() {
+ b = append(b, '(')
+ b = append(b, strings.Trim(fd.TextName(), "[]")...)
+ b = append(b, ')')
+ } else {
+ b = append(b, fd.TextName()...)
+ }
+ case UnknownAccessStep:
+ b = append(b, '.')
+ b = append(b, '?')
+ case ListIndexStep:
+ b = append(b, '[')
+ b = strconv.AppendInt(b, s.key.Int(), 10)
+ b = append(b, ']')
+ case MapIndexStep:
+ b = append(b, '[')
+ switch k := s.key.Interface().(type) {
+ case bool:
+ b = strconv.AppendBool(b, bool(k)) // e.g., "true" or "false"
+ case int32:
+ b = strconv.AppendInt(b, int64(k), 10) // e.g., "-32"
+ case int64:
+ b = strconv.AppendInt(b, int64(k), 10) // e.g., "-64"
+ case uint32:
+ b = strconv.AppendUint(b, uint64(k), 10) // e.g., "32"
+ case uint64:
+ b = strconv.AppendUint(b, uint64(k), 10) // e.g., "64"
+ case string:
+ b = text.AppendString(b, k) // e.g., `"hello, world"`
+ }
+ b = append(b, ']')
+ case AnyExpandStep:
+ b = append(b, '.')
+ b = append(b, '(')
+ b = append(b, s.desc.FullName()...)
+ b = append(b, ')')
+ default:
+ b = append(b, ""...)
+ }
+ return b
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protorange/range.go b/vendor/google.golang.org/protobuf/reflect/protorange/range.go
new file mode 100644
index 00000000..6f4c58bf
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protorange/range.go
@@ -0,0 +1,316 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protorange provides functionality to traverse a message value.
+package protorange
+
+import (
+ "bytes"
+ "errors"
+
+ "google.golang.org/protobuf/internal/genid"
+ "google.golang.org/protobuf/internal/order"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protopath"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+var (
+ // Break breaks traversal of children in the current value.
+ // It has no effect when traversing values that are not composite types
+ // (e.g., messages, lists, and maps).
+ Break = errors.New("break traversal of children in current value")
+
+ // Terminate terminates the entire range operation.
+ // All necessary Pop operations continue to be called.
+ Terminate = errors.New("terminate range operation")
+)
+
+// Range performs a depth-first traversal over reachable values in a message.
+//
+// See Options.Range for details.
+func Range(m protoreflect.Message, f func(protopath.Values) error) error {
+ return Options{}.Range(m, f, nil)
+}
+
+// Options configures traversal of a message value tree.
+type Options struct {
+ // Stable specifies whether to visit message fields and map entries
+ // in a stable ordering. If false, then the ordering is undefined and
+ // may be non-deterministic.
+ //
+ // Message fields are visited in ascending order by field number.
+ // Map entries are visited in ascending order, where
+ // boolean keys are ordered such that false sorts before true,
+ // numeric keys are ordered based on the numeric value, and
+ // string keys are lexicographically ordered by Unicode codepoints.
+ Stable bool
+
+ // Resolver is used for looking up types when expanding google.protobuf.Any
+ // messages. If nil, this defaults to using protoregistry.GlobalTypes.
+ // To prevent expansion of Any messages, pass an empty protoregistry.Types:
+ //
+ // Options{Resolver: (*protoregistry.Types)(nil)}
+ //
+ Resolver interface {
+ protoregistry.ExtensionTypeResolver
+ protoregistry.MessageTypeResolver
+ }
+}
+
+// Range performs a depth-first traversal over reachable values in a message.
+// The first push and the last pop are to push/pop a protopath.Root step.
+// If push or pop return any non-nil error (other than Break or Terminate),
+// it terminates the traversal and is returned by Range.
+//
+// The rules for traversing a message is as follows:
+//
+// • For messages, iterate over every populated known and extension field.
+// Each field is preceded by a push of a protopath.FieldAccess step,
+// followed by recursive application of the rules on the field value,
+// and succeeded by a pop of that step.
+// If the message has unknown fields, then push an protopath.UnknownAccess step
+// followed immediately by pop of that step.
+//
+// • As an exception to the above rule, if the current message is a
+// google.protobuf.Any message, expand the underlying message (if resolvable).
+// The expanded message is preceded by a push of a protopath.AnyExpand step,
+// followed by recursive application of the rules on the underlying message,
+// and succeeded by a pop of that step. Mutations to the expanded message
+// are written back to the Any message when popping back out.
+//
+// • For lists, iterate over every element. Each element is preceded by a push
+// of a protopath.ListIndex step, followed by recursive application of the rules
+// on the list element, and succeeded by a pop of that step.
+//
+// • For maps, iterate over every entry. Each entry is preceded by a push
+// of a protopath.MapIndex step, followed by recursive application of the rules
+// on the map entry value, and succeeded by a pop of that step.
+//
+// Mutations should only be made to the last value, otherwise the effects on
+// traversal will be undefined. If the mutation is made to the last value
+// during to a push, then the effects of the mutation will affect traversal.
+// For example, if the last value is currently a message, and the push function
+// populates a few fields in that message, then the newly modified fields
+// will be traversed.
+//
+// The protopath.Values provided to push functions is only valid until the
+// corresponding pop call and the values provided to a pop call is only valid
+// for the duration of the pop call itself.
+func (o Options) Range(m protoreflect.Message, push, pop func(protopath.Values) error) error {
+ var err error
+ p := new(protopath.Values)
+ if o.Resolver == nil {
+ o.Resolver = protoregistry.GlobalTypes
+ }
+
+ pushStep(p, protopath.Root(m.Descriptor()), protoreflect.ValueOfMessage(m))
+ if push != nil {
+ err = amendError(err, push(*p))
+ }
+ if err == nil {
+ err = o.rangeMessage(p, m, push, pop)
+ }
+ if pop != nil {
+ err = amendError(err, pop(*p))
+ }
+ popStep(p)
+
+ if err == Break || err == Terminate {
+ err = nil
+ }
+ return err
+}
+
+func (o Options) rangeMessage(p *protopath.Values, m protoreflect.Message, push, pop func(protopath.Values) error) (err error) {
+ if ok, err := o.rangeAnyMessage(p, m, push, pop); ok {
+ return err
+ }
+
+ fieldOrder := order.AnyFieldOrder
+ if o.Stable {
+ fieldOrder = order.NumberFieldOrder
+ }
+ order.RangeFields(m, fieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ pushStep(p, protopath.FieldAccess(fd), v)
+ if push != nil {
+ err = amendError(err, push(*p))
+ }
+ if err == nil {
+ switch {
+ case fd.IsMap():
+ err = o.rangeMap(p, fd, v.Map(), push, pop)
+ case fd.IsList():
+ err = o.rangeList(p, fd, v.List(), push, pop)
+ case fd.Message() != nil:
+ err = o.rangeMessage(p, v.Message(), push, pop)
+ }
+ }
+ if pop != nil {
+ err = amendError(err, pop(*p))
+ }
+ popStep(p)
+ return err == nil
+ })
+
+ if b := m.GetUnknown(); len(b) > 0 && err == nil {
+ pushStep(p, protopath.UnknownAccess(), protoreflect.ValueOfBytes(b))
+ if push != nil {
+ err = amendError(err, push(*p))
+ }
+ if pop != nil {
+ err = amendError(err, pop(*p))
+ }
+ popStep(p)
+ }
+
+ if err == Break {
+ err = nil
+ }
+ return err
+}
+
+func (o Options) rangeAnyMessage(p *protopath.Values, m protoreflect.Message, push, pop func(protopath.Values) error) (ok bool, err error) {
+ md := m.Descriptor()
+ if md.FullName() != "google.protobuf.Any" {
+ return false, nil
+ }
+
+ fds := md.Fields()
+ url := m.Get(fds.ByNumber(genid.Any_TypeUrl_field_number)).String()
+ val := m.Get(fds.ByNumber(genid.Any_Value_field_number)).Bytes()
+ mt, errFind := o.Resolver.FindMessageByURL(url)
+ if errFind != nil {
+ return false, nil
+ }
+
+ // Unmarshal the raw encoded message value into a structured message value.
+ m2 := mt.New()
+ errUnmarshal := proto.UnmarshalOptions{
+ Merge: true,
+ AllowPartial: true,
+ Resolver: o.Resolver,
+ }.Unmarshal(val, m2.Interface())
+ if errUnmarshal != nil {
+ // If the the underlying message cannot be unmarshaled,
+ // then just treat this as an normal message type.
+ return false, nil
+ }
+
+ // Marshal Any before ranging to detect possible mutations.
+ b1, errMarshal := proto.MarshalOptions{
+ AllowPartial: true,
+ Deterministic: true,
+ }.Marshal(m2.Interface())
+ if errMarshal != nil {
+ return true, errMarshal
+ }
+
+ pushStep(p, protopath.AnyExpand(m2.Descriptor()), protoreflect.ValueOfMessage(m2))
+ if push != nil {
+ err = amendError(err, push(*p))
+ }
+ if err == nil {
+ err = o.rangeMessage(p, m2, push, pop)
+ }
+ if pop != nil {
+ err = amendError(err, pop(*p))
+ }
+ popStep(p)
+
+ // Marshal Any after ranging to detect possible mutations.
+ b2, errMarshal := proto.MarshalOptions{
+ AllowPartial: true,
+ Deterministic: true,
+ }.Marshal(m2.Interface())
+ if errMarshal != nil {
+ return true, errMarshal
+ }
+
+ // Mutations detected, write the new sequence of bytes to the Any message.
+ if !bytes.Equal(b1, b2) {
+ m.Set(fds.ByNumber(genid.Any_Value_field_number), protoreflect.ValueOfBytes(b2))
+ }
+
+ if err == Break {
+ err = nil
+ }
+ return true, err
+}
+
+func (o Options) rangeList(p *protopath.Values, fd protoreflect.FieldDescriptor, ls protoreflect.List, push, pop func(protopath.Values) error) (err error) {
+ for i := 0; i < ls.Len() && err == nil; i++ {
+ v := ls.Get(i)
+ pushStep(p, protopath.ListIndex(i), v)
+ if push != nil {
+ err = amendError(err, push(*p))
+ }
+ if err == nil && fd.Message() != nil {
+ err = o.rangeMessage(p, v.Message(), push, pop)
+ }
+ if pop != nil {
+ err = amendError(err, pop(*p))
+ }
+ popStep(p)
+ }
+
+ if err == Break {
+ err = nil
+ }
+ return err
+}
+
+func (o Options) rangeMap(p *protopath.Values, fd protoreflect.FieldDescriptor, ms protoreflect.Map, push, pop func(protopath.Values) error) (err error) {
+ keyOrder := order.AnyKeyOrder
+ if o.Stable {
+ keyOrder = order.GenericKeyOrder
+ }
+ order.RangeEntries(ms, keyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool {
+ pushStep(p, protopath.MapIndex(k), v)
+ if push != nil {
+ err = amendError(err, push(*p))
+ }
+ if err == nil && fd.MapValue().Message() != nil {
+ err = o.rangeMessage(p, v.Message(), push, pop)
+ }
+ if pop != nil {
+ err = amendError(err, pop(*p))
+ }
+ popStep(p)
+ return err == nil
+ })
+
+ if err == Break {
+ err = nil
+ }
+ return err
+}
+
+func pushStep(p *protopath.Values, s protopath.Step, v protoreflect.Value) {
+ p.Path = append(p.Path, s)
+ p.Values = append(p.Values, v)
+}
+
+func popStep(p *protopath.Values) {
+ p.Path = p.Path[:len(p.Path)-1]
+ p.Values = p.Values[:len(p.Values)-1]
+}
+
+// amendError amends the previous error with the current error if it is
+// considered more serious. The precedence order for errors is:
+//
+// nil < Break < Terminate < previous non-nil < current non-nil
+func amendError(prev, curr error) error {
+ switch {
+ case curr == nil:
+ return prev
+ case curr == Break && prev != nil:
+ return prev
+ case curr == Terminate && prev != nil && prev != Break:
+ return prev
+ default:
+ return curr
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
index b03c1223..54ce326d 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
@@ -35,6 +35,8 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte {
b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo)
case 12:
b = p.appendSingularField(b, "syntax", nil)
+ case 13:
+ b = p.appendSingularField(b, "edition", nil)
}
return b
}
@@ -236,6 +238,8 @@ func (p *SourcePath) appendMessageOptions(b []byte) []byte {
b = p.appendSingularField(b, "deprecated", nil)
case 7:
b = p.appendSingularField(b, "map_entry", nil)
+ case 11:
+ b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
@@ -279,6 +283,8 @@ func (p *SourcePath) appendEnumOptions(b []byte) []byte {
b = p.appendSingularField(b, "allow_alias", nil)
case 3:
b = p.appendSingularField(b, "deprecated", nil)
+ case 6:
+ b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
@@ -345,10 +351,18 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte {
b = p.appendSingularField(b, "jstype", nil)
case 5:
b = p.appendSingularField(b, "lazy", nil)
+ case 15:
+ b = p.appendSingularField(b, "unverified_lazy", nil)
case 3:
b = p.appendSingularField(b, "deprecated", nil)
case 10:
b = p.appendSingularField(b, "weak", nil)
+ case 16:
+ b = p.appendSingularField(b, "debug_redact", nil)
+ case 17:
+ b = p.appendSingularField(b, "retention", nil)
+ case 18:
+ b = p.appendSingularField(b, "target", nil)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
index f3198107..37601b78 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
@@ -148,7 +148,7 @@ type Message interface {
// be preserved in marshaling or other operations.
IsValid() bool
- // ProtoMethods returns optional fast-path implementions of various operations.
+ // ProtoMethods returns optional fast-path implementations of various operations.
// This method may return nil.
//
// The returned methods type is identical to
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go
new file mode 100644
index 00000000..59165254
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go
@@ -0,0 +1,168 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoreflect
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "reflect"
+
+ "google.golang.org/protobuf/encoding/protowire"
+)
+
+// Equal reports whether v1 and v2 are recursively equal.
+//
+// - Values of different types are always unequal.
+//
+// - Bytes values are equal if they contain identical bytes.
+// Empty bytes (regardless of nil-ness) are considered equal.
+//
+// - Floating point values are equal if they contain the same value.
+// Unlike the == operator, a NaN is equal to another NaN.
+//
+// - Enums are equal if they contain the same number.
+// Since Value does not contain an enum descriptor,
+// enum values do not consider the type of the enum.
+//
+// - Other scalar values are equal if they contain the same value.
+//
+// - Message values are equal if they belong to the same message descriptor,
+// have the same set of populated known and extension field values,
+// and the same set of unknown fields values.
+//
+// - Lists are equal if they are the same length and
+// each corresponding element is equal.
+//
+// - Maps are equal if they have the same set of keys and
+// the corresponding value for each key is equal.
+func (v1 Value) Equal(v2 Value) bool {
+ return equalValue(v1, v2)
+}
+
+func equalValue(x, y Value) bool {
+ eqType := x.typ == y.typ
+ switch x.typ {
+ case nilType:
+ return eqType
+ case boolType:
+ return eqType && x.Bool() == y.Bool()
+ case int32Type, int64Type:
+ return eqType && x.Int() == y.Int()
+ case uint32Type, uint64Type:
+ return eqType && x.Uint() == y.Uint()
+ case float32Type, float64Type:
+ return eqType && equalFloat(x.Float(), y.Float())
+ case stringType:
+ return eqType && x.String() == y.String()
+ case bytesType:
+ return eqType && bytes.Equal(x.Bytes(), y.Bytes())
+ case enumType:
+ return eqType && x.Enum() == y.Enum()
+ default:
+ switch x := x.Interface().(type) {
+ case Message:
+ y, ok := y.Interface().(Message)
+ return ok && equalMessage(x, y)
+ case List:
+ y, ok := y.Interface().(List)
+ return ok && equalList(x, y)
+ case Map:
+ y, ok := y.Interface().(Map)
+ return ok && equalMap(x, y)
+ default:
+ panic(fmt.Sprintf("unknown type: %T", x))
+ }
+ }
+}
+
+// equalFloat compares two floats, where NaNs are treated as equal.
+func equalFloat(x, y float64) bool {
+ if math.IsNaN(x) || math.IsNaN(y) {
+ return math.IsNaN(x) && math.IsNaN(y)
+ }
+ return x == y
+}
+
+// equalMessage compares two messages.
+func equalMessage(mx, my Message) bool {
+ if mx.Descriptor() != my.Descriptor() {
+ return false
+ }
+
+ nx := 0
+ equal := true
+ mx.Range(func(fd FieldDescriptor, vx Value) bool {
+ nx++
+ vy := my.Get(fd)
+ equal = my.Has(fd) && equalValue(vx, vy)
+ return equal
+ })
+ if !equal {
+ return false
+ }
+ ny := 0
+ my.Range(func(fd FieldDescriptor, vx Value) bool {
+ ny++
+ return true
+ })
+ if nx != ny {
+ return false
+ }
+
+ return equalUnknown(mx.GetUnknown(), my.GetUnknown())
+}
+
+// equalList compares two lists.
+func equalList(x, y List) bool {
+ if x.Len() != y.Len() {
+ return false
+ }
+ for i := x.Len() - 1; i >= 0; i-- {
+ if !equalValue(x.Get(i), y.Get(i)) {
+ return false
+ }
+ }
+ return true
+}
+
+// equalMap compares two maps.
+func equalMap(x, y Map) bool {
+ if x.Len() != y.Len() {
+ return false
+ }
+ equal := true
+ x.Range(func(k MapKey, vx Value) bool {
+ vy := y.Get(k)
+ equal = y.Has(k) && equalValue(vx, vy)
+ return equal
+ })
+ return equal
+}
+
+// equalUnknown compares unknown fields by direct comparison on the raw bytes
+// of each individual field number.
+func equalUnknown(x, y RawFields) bool {
+ if len(x) != len(y) {
+ return false
+ }
+ if bytes.Equal([]byte(x), []byte(y)) {
+ return true
+ }
+
+ mx := make(map[FieldNumber]RawFields)
+ my := make(map[FieldNumber]RawFields)
+ for len(x) > 0 {
+ fnum, _, n := protowire.ConsumeField(x)
+ mx[fnum] = append(mx[fnum], x[:n]...)
+ x = x[n:]
+ }
+ for len(y) > 0 {
+ fnum, _, n := protowire.ConsumeField(y)
+ my[fnum] = append(my[fnum], y[:n]...)
+ y = y[n:]
+ }
+ return reflect.DeepEqual(mx, my)
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
index ca8e28c5..08e5ef73 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
@@ -54,11 +54,11 @@ import (
// // Append a 0 to a "repeated int32" field.
// // Since the Value returned by Mutable is guaranteed to alias
// // the source message, modifying the Value modifies the message.
-// message.Mutable(fieldDesc).(List).Append(protoreflect.ValueOfInt32(0))
+// message.Mutable(fieldDesc).List().Append(protoreflect.ValueOfInt32(0))
//
// // Assign [0] to a "repeated int32" field by creating a new Value,
// // modifying it, and assigning it.
-// list := message.NewField(fieldDesc).(List)
+// list := message.NewField(fieldDesc).List()
// list.Append(protoreflect.ValueOfInt32(0))
// message.Set(fieldDesc, list)
// // ERROR: Since it is not defined whether Set aliases the source,
diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
index 58352a69..aeb55977 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
@@ -46,7 +46,7 @@ var conflictPolicy = "panic" // "panic" | "warn" | "ignore"
// It is a variable so that the behavior is easily overridden in another file.
var ignoreConflict = func(d protoreflect.Descriptor, err error) bool {
const env = "GOLANG_PROTOBUF_REGISTRATION_CONFLICT"
- const faq = "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict"
+ const faq = "https://protobuf.dev/reference/go/faq#namespace-conflict"
policy := conflictPolicy
if v := os.Getenv(env); v != "" {
policy = v
diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
index abe4ab51..dac5671d 100644
--- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
+++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -406,6 +406,152 @@ func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) {
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1}
}
+// If set to RETENTION_SOURCE, the option will be omitted from the binary.
+// Note: as of January 2023, support for this is in progress and does not yet
+// have an effect (b/264593489).
+type FieldOptions_OptionRetention int32
+
+const (
+ FieldOptions_RETENTION_UNKNOWN FieldOptions_OptionRetention = 0
+ FieldOptions_RETENTION_RUNTIME FieldOptions_OptionRetention = 1
+ FieldOptions_RETENTION_SOURCE FieldOptions_OptionRetention = 2
+)
+
+// Enum value maps for FieldOptions_OptionRetention.
+var (
+ FieldOptions_OptionRetention_name = map[int32]string{
+ 0: "RETENTION_UNKNOWN",
+ 1: "RETENTION_RUNTIME",
+ 2: "RETENTION_SOURCE",
+ }
+ FieldOptions_OptionRetention_value = map[string]int32{
+ "RETENTION_UNKNOWN": 0,
+ "RETENTION_RUNTIME": 1,
+ "RETENTION_SOURCE": 2,
+ }
+)
+
+func (x FieldOptions_OptionRetention) Enum() *FieldOptions_OptionRetention {
+ p := new(FieldOptions_OptionRetention)
+ *p = x
+ return p
+}
+
+func (x FieldOptions_OptionRetention) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
+}
+
+func (FieldOptions_OptionRetention) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[5]
+}
+
+func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FieldOptions_OptionRetention) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = FieldOptions_OptionRetention(num)
+ return nil
+}
+
+// Deprecated: Use FieldOptions_OptionRetention.Descriptor instead.
+func (FieldOptions_OptionRetention) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 2}
+}
+
+// This indicates the types of entities that the field may apply to when used
+// as an option. If it is unset, then the field may be freely used as an
+// option on any kind of entity. Note: as of January 2023, support for this is
+// in progress and does not yet have an effect (b/264593489).
+type FieldOptions_OptionTargetType int32
+
+const (
+ FieldOptions_TARGET_TYPE_UNKNOWN FieldOptions_OptionTargetType = 0
+ FieldOptions_TARGET_TYPE_FILE FieldOptions_OptionTargetType = 1
+ FieldOptions_TARGET_TYPE_EXTENSION_RANGE FieldOptions_OptionTargetType = 2
+ FieldOptions_TARGET_TYPE_MESSAGE FieldOptions_OptionTargetType = 3
+ FieldOptions_TARGET_TYPE_FIELD FieldOptions_OptionTargetType = 4
+ FieldOptions_TARGET_TYPE_ONEOF FieldOptions_OptionTargetType = 5
+ FieldOptions_TARGET_TYPE_ENUM FieldOptions_OptionTargetType = 6
+ FieldOptions_TARGET_TYPE_ENUM_ENTRY FieldOptions_OptionTargetType = 7
+ FieldOptions_TARGET_TYPE_SERVICE FieldOptions_OptionTargetType = 8
+ FieldOptions_TARGET_TYPE_METHOD FieldOptions_OptionTargetType = 9
+)
+
+// Enum value maps for FieldOptions_OptionTargetType.
+var (
+ FieldOptions_OptionTargetType_name = map[int32]string{
+ 0: "TARGET_TYPE_UNKNOWN",
+ 1: "TARGET_TYPE_FILE",
+ 2: "TARGET_TYPE_EXTENSION_RANGE",
+ 3: "TARGET_TYPE_MESSAGE",
+ 4: "TARGET_TYPE_FIELD",
+ 5: "TARGET_TYPE_ONEOF",
+ 6: "TARGET_TYPE_ENUM",
+ 7: "TARGET_TYPE_ENUM_ENTRY",
+ 8: "TARGET_TYPE_SERVICE",
+ 9: "TARGET_TYPE_METHOD",
+ }
+ FieldOptions_OptionTargetType_value = map[string]int32{
+ "TARGET_TYPE_UNKNOWN": 0,
+ "TARGET_TYPE_FILE": 1,
+ "TARGET_TYPE_EXTENSION_RANGE": 2,
+ "TARGET_TYPE_MESSAGE": 3,
+ "TARGET_TYPE_FIELD": 4,
+ "TARGET_TYPE_ONEOF": 5,
+ "TARGET_TYPE_ENUM": 6,
+ "TARGET_TYPE_ENUM_ENTRY": 7,
+ "TARGET_TYPE_SERVICE": 8,
+ "TARGET_TYPE_METHOD": 9,
+ }
+)
+
+func (x FieldOptions_OptionTargetType) Enum() *FieldOptions_OptionTargetType {
+ p := new(FieldOptions_OptionTargetType)
+ *p = x
+ return p
+}
+
+func (x FieldOptions_OptionTargetType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor()
+}
+
+func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[6]
+}
+
+func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FieldOptions_OptionTargetType) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = FieldOptions_OptionTargetType(num)
+ return nil
+}
+
+// Deprecated: Use FieldOptions_OptionTargetType.Descriptor instead.
+func (FieldOptions_OptionTargetType) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 3}
+}
+
// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
// or neither? HTTP based RPC implementation may choose GET verb for safe
// methods, and PUT verb for idempotent methods instead of the default POST.
@@ -442,11 +588,11 @@ func (x MethodOptions_IdempotencyLevel) String() string {
}
func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor()
}
func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[5]
+ return &file_google_protobuf_descriptor_proto_enumTypes[7]
}
func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber {
@@ -468,6 +614,70 @@ func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) {
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0}
}
+// Represents the identified object's effect on the element in the original
+// .proto file.
+type GeneratedCodeInfo_Annotation_Semantic int32
+
+const (
+ // There is no effect or the effect is indescribable.
+ GeneratedCodeInfo_Annotation_NONE GeneratedCodeInfo_Annotation_Semantic = 0
+ // The element is set or otherwise mutated.
+ GeneratedCodeInfo_Annotation_SET GeneratedCodeInfo_Annotation_Semantic = 1
+ // An alias to the element is returned.
+ GeneratedCodeInfo_Annotation_ALIAS GeneratedCodeInfo_Annotation_Semantic = 2
+)
+
+// Enum value maps for GeneratedCodeInfo_Annotation_Semantic.
+var (
+ GeneratedCodeInfo_Annotation_Semantic_name = map[int32]string{
+ 0: "NONE",
+ 1: "SET",
+ 2: "ALIAS",
+ }
+ GeneratedCodeInfo_Annotation_Semantic_value = map[string]int32{
+ "NONE": 0,
+ "SET": 1,
+ "ALIAS": 2,
+ }
+)
+
+func (x GeneratedCodeInfo_Annotation_Semantic) Enum() *GeneratedCodeInfo_Annotation_Semantic {
+ p := new(GeneratedCodeInfo_Annotation_Semantic)
+ *p = x
+ return p
+}
+
+func (x GeneratedCodeInfo_Annotation_Semantic) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor()
+}
+
+func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[8]
+}
+
+func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *GeneratedCodeInfo_Annotation_Semantic) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = GeneratedCodeInfo_Annotation_Semantic(num)
+ return nil
+}
+
+// Deprecated: Use GeneratedCodeInfo_Annotation_Semantic.Descriptor instead.
+func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0, 0}
+}
+
// The protocol compiler can output a FileDescriptorSet containing the .proto
// files it parses.
type FileDescriptorSet struct {
@@ -544,8 +754,12 @@ type FileDescriptorProto struct {
// development tools.
SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"`
// The syntax of the proto file.
- // The supported values are "proto2" and "proto3".
+ // The supported values are "proto2", "proto3", and "editions".
+ //
+ // If `edition` is present, this value must be "editions".
Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
+ // The edition of the proto file, which is an opaque string.
+ Edition *string `protobuf:"bytes,13,opt,name=edition" json:"edition,omitempty"`
}
func (x *FileDescriptorProto) Reset() {
@@ -664,6 +878,13 @@ func (x *FileDescriptorProto) GetSyntax() string {
return ""
}
+func (x *FileDescriptorProto) GetEdition() string {
+ if x != nil && x.Edition != nil {
+ return *x.Edition
+ }
+ return ""
+}
+
// Describes a message type.
type DescriptorProto struct {
state protoimpl.MessageState
@@ -860,7 +1081,6 @@ type FieldDescriptorProto struct {
// For booleans, "true" or "false".
// For strings, contains the default text contents (not escaped in any way).
// For bytes, contains the C escaped value. All bytes >= 128 are escaped.
- // TODO(kenton): Base-64 encode?
DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"`
// If set, gives the index of a oneof in the containing type's oneof_decl
// list. This field is a member of that oneof.
@@ -1382,22 +1602,22 @@ type FileOptions struct {
// inappropriate because proto packages do not normally start with backwards
// domain names.
JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"`
- // If set, all the classes from the .proto file are wrapped in a single
- // outer class with the given name. This applies to both Proto1
- // (equivalent to the old "--one_java_file" option) and Proto2 (where
- // a .proto always translates to a single class, but you may want to
- // explicitly choose the class name).
+ // Controls the name of the wrapper Java class generated for the .proto file.
+ // That class will always contain the .proto file's getDescriptor() method as
+ // well as any top-level extensions defined in the .proto file.
+ // If java_multiple_files is disabled, then all the other classes from the
+ // .proto file will be nested inside the single wrapper outer class.
JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"`
- // If set true, then the Java code generator will generate a separate .java
+ // If enabled, then the Java code generator will generate a separate .java
// file for each top-level message, enum, and service defined in the .proto
- // file. Thus, these types will *not* be nested inside the outer class
- // named by java_outer_classname. However, the outer class will still be
+ // file. Thus, these types will *not* be nested inside the wrapper class
+ // named by java_outer_classname. However, the wrapper class will still be
// generated to contain the file's getDescriptor() method as well as any
// top-level extensions defined in the file.
JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"`
// This option does nothing.
//
- // Deprecated: Do not use.
+ // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"`
// If set true, then the Java2 code generator will generate code that
// throws an exception whenever an attempt is made to assign a non-UTF-8
@@ -1531,7 +1751,7 @@ func (x *FileOptions) GetJavaMultipleFiles() bool {
return Default_FileOptions_JavaMultipleFiles
}
-// Deprecated: Do not use.
+// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
func (x *FileOptions) GetJavaGenerateEqualsAndHash() bool {
if x != nil && x.JavaGenerateEqualsAndHash != nil {
return *x.JavaGenerateEqualsAndHash
@@ -1670,10 +1890,12 @@ type MessageOptions struct {
// efficient, has fewer features, and is more complicated.
//
// The message must be defined exactly as follows:
- // message Foo {
- // option message_set_wire_format = true;
- // extensions 4 to max;
- // }
+ //
+ // message Foo {
+ // option message_set_wire_format = true;
+ // extensions 4 to max;
+ // }
+ //
// Note that the message cannot have any defined fields; MessageSets only
// have extensions.
//
@@ -1692,28 +1914,44 @@ type MessageOptions struct {
// for the message, or it will be completely ignored; in the very least,
// this is a formalization for deprecating messages.
Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // NOTE: Do not set the option in .proto files. Always use the maps syntax
+ // instead. The option should only be implicitly set by the proto compiler
+ // parser.
+ //
// Whether the message is an automatically generated map entry type for the
// maps field.
//
// For maps fields:
- // map map_field = 1;
+ //
+ // map map_field = 1;
+ //
// The parsed descriptor looks like:
- // message MapFieldEntry {
- // option map_entry = true;
- // optional KeyType key = 1;
- // optional ValueType value = 2;
- // }
- // repeated MapFieldEntry map_field = 1;
+ //
+ // message MapFieldEntry {
+ // option map_entry = true;
+ // optional KeyType key = 1;
+ // optional ValueType value = 2;
+ // }
+ // repeated MapFieldEntry map_field = 1;
//
// Implementations may choose not to generate the map_entry=true message, but
// use a native map in the target language to hold the keys and values.
// The reflection APIs in such implementations still need to work as
// if the field is a repeated message field.
- //
- // NOTE: Do not set the option in .proto files. Always use the maps syntax
- // instead. The option should only be implicitly set by the proto compiler
- // parser.
MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"`
+ // Enable the legacy handling of JSON field name conflicts. This lowercases
+ // and strips underscored from the fields before comparison in proto3 only.
+ // The new behavior takes `json_name` into account and applies to proto2 as
+ // well.
+ //
+ // This should only be used as a temporary measure against broken builds due
+ // to the change in behavior for JSON field name conflicts.
+ //
+ // TODO(b/261750190) This is legacy behavior we plan to remove once downstream
+ // teams have had time to migrate.
+ //
+ // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
+ DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
}
@@ -1785,6 +2023,14 @@ func (x *MessageOptions) GetMapEntry() bool {
return false
}
+// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
+func (x *MessageOptions) GetDeprecatedLegacyJsonFieldConflicts() bool {
+ if x != nil && x.DeprecatedLegacyJsonFieldConflicts != nil {
+ return *x.DeprecatedLegacyJsonFieldConflicts
+ }
+ return false
+}
+
func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
if x != nil {
return x.UninterpretedOption
@@ -1838,7 +2084,6 @@ type FieldOptions struct {
// call from multiple threads concurrently, while non-const methods continue
// to require exclusive access.
//
- //
// Note that implementations may choose not to check required fields within
// a lazy sub-message. That is, calling IsInitialized() on the outer message
// may return true even if the inner message has missing required fields.
@@ -1849,7 +2094,14 @@ type FieldOptions struct {
// implementation must either *always* check its required fields, or *never*
// check its required fields, regardless of whether or not the message has
// been parsed.
+ //
+ // As of May 2022, lazy verifies the contents of the byte stream during
+ // parsing. An invalid byte stream will cause the overall parsing to fail.
Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"`
+ // unverified_lazy does no correctness checks on the byte stream. This should
+ // only be used where lazy with verification is prohibitive for performance
+ // reasons.
+ UnverifiedLazy *bool `protobuf:"varint,15,opt,name=unverified_lazy,json=unverifiedLazy,def=0" json:"unverified_lazy,omitempty"`
// Is this field deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for accessors, or it will be completely ignored; in the very least, this
@@ -1857,17 +2109,24 @@ type FieldOptions struct {
Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
// For Google-internal migration only. Do not use.
Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
+ // Indicate that the field value should not be printed out when using debug
+ // formats, e.g. when the field contains sensitive credentials.
+ DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"`
+ Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"`
+ Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
}
// Default values for FieldOptions fields.
const (
- Default_FieldOptions_Ctype = FieldOptions_STRING
- Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL
- Default_FieldOptions_Lazy = bool(false)
- Default_FieldOptions_Deprecated = bool(false)
- Default_FieldOptions_Weak = bool(false)
+ Default_FieldOptions_Ctype = FieldOptions_STRING
+ Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL
+ Default_FieldOptions_Lazy = bool(false)
+ Default_FieldOptions_UnverifiedLazy = bool(false)
+ Default_FieldOptions_Deprecated = bool(false)
+ Default_FieldOptions_Weak = bool(false)
+ Default_FieldOptions_DebugRedact = bool(false)
)
func (x *FieldOptions) Reset() {
@@ -1930,6 +2189,13 @@ func (x *FieldOptions) GetLazy() bool {
return Default_FieldOptions_Lazy
}
+func (x *FieldOptions) GetUnverifiedLazy() bool {
+ if x != nil && x.UnverifiedLazy != nil {
+ return *x.UnverifiedLazy
+ }
+ return Default_FieldOptions_UnverifiedLazy
+}
+
func (x *FieldOptions) GetDeprecated() bool {
if x != nil && x.Deprecated != nil {
return *x.Deprecated
@@ -1944,6 +2210,27 @@ func (x *FieldOptions) GetWeak() bool {
return Default_FieldOptions_Weak
}
+func (x *FieldOptions) GetDebugRedact() bool {
+ if x != nil && x.DebugRedact != nil {
+ return *x.DebugRedact
+ }
+ return Default_FieldOptions_DebugRedact
+}
+
+func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention {
+ if x != nil && x.Retention != nil {
+ return *x.Retention
+ }
+ return FieldOptions_RETENTION_UNKNOWN
+}
+
+func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType {
+ if x != nil && x.Target != nil {
+ return *x.Target
+ }
+ return FieldOptions_TARGET_TYPE_UNKNOWN
+}
+
func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
if x != nil {
return x.UninterpretedOption
@@ -2014,6 +2301,15 @@ type EnumOptions struct {
// for the enum, or it will be completely ignored; in the very least, this
// is a formalization for deprecating enums.
Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // Enable the legacy handling of JSON field name conflicts. This lowercases
+ // and strips underscored from the fields before comparison in proto3 only.
+ // The new behavior takes `json_name` into account and applies to proto2 as
+ // well.
+ // TODO(b/261750190) Remove this legacy behavior once downstream teams have
+ // had time to migrate.
+ //
+ // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
+ DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
}
@@ -2069,6 +2365,14 @@ func (x *EnumOptions) GetDeprecated() bool {
return Default_EnumOptions_Deprecated
}
+// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
+func (x *EnumOptions) GetDeprecatedLegacyJsonFieldConflicts() bool {
+ if x != nil && x.DeprecatedLegacyJsonFieldConflicts != nil {
+ return *x.DeprecatedLegacyJsonFieldConflicts
+ }
+ return false
+}
+
func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
if x != nil {
return x.UninterpretedOption
@@ -2399,43 +2703,48 @@ type SourceCodeInfo struct {
// tools.
//
// For example, say we have a file like:
- // message Foo {
- // optional string foo = 1;
- // }
+ //
+ // message Foo {
+ // optional string foo = 1;
+ // }
+ //
// Let's look at just the field definition:
- // optional string foo = 1;
- // ^ ^^ ^^ ^ ^^^
- // a bc de f ghi
+ //
+ // optional string foo = 1;
+ // ^ ^^ ^^ ^ ^^^
+ // a bc de f ghi
+ //
// We have the following locations:
- // span path represents
- // [a,i) [ 4, 0, 2, 0 ] The whole field definition.
- // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
- // [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
- // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
- // [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
+ //
+ // span path represents
+ // [a,i) [ 4, 0, 2, 0 ] The whole field definition.
+ // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
+ // [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
+ // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
+ // [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
//
// Notes:
- // - A location may refer to a repeated field itself (i.e. not to any
- // particular index within it). This is used whenever a set of elements are
- // logically enclosed in a single code segment. For example, an entire
- // extend block (possibly containing multiple extension definitions) will
- // have an outer location whose path refers to the "extensions" repeated
- // field without an index.
- // - Multiple locations may have the same path. This happens when a single
- // logical declaration is spread out across multiple places. The most
- // obvious example is the "extend" block again -- there may be multiple
- // extend blocks in the same scope, each of which will have the same path.
- // - A location's span is not always a subset of its parent's span. For
- // example, the "extendee" of an extension declaration appears at the
- // beginning of the "extend" block and is shared by all extensions within
- // the block.
- // - Just because a location's span is a subset of some other location's span
- // does not mean that it is a descendant. For example, a "group" defines
- // both a type and a field in a single declaration. Thus, the locations
- // corresponding to the type and field and their components will overlap.
- // - Code which tries to interpret locations should probably be designed to
- // ignore those that it doesn't understand, as more types of locations could
- // be recorded in the future.
+ // - A location may refer to a repeated field itself (i.e. not to any
+ // particular index within it). This is used whenever a set of elements are
+ // logically enclosed in a single code segment. For example, an entire
+ // extend block (possibly containing multiple extension definitions) will
+ // have an outer location whose path refers to the "extensions" repeated
+ // field without an index.
+ // - Multiple locations may have the same path. This happens when a single
+ // logical declaration is spread out across multiple places. The most
+ // obvious example is the "extend" block again -- there may be multiple
+ // extend blocks in the same scope, each of which will have the same path.
+ // - A location's span is not always a subset of its parent's span. For
+ // example, the "extendee" of an extension declaration appears at the
+ // beginning of the "extend" block and is shared by all extensions within
+ // the block.
+ // - Just because a location's span is a subset of some other location's span
+ // does not mean that it is a descendant. For example, a "group" defines
+ // both a type and a field in a single declaration. Thus, the locations
+ // corresponding to the type and field and their components will overlap.
+ // - Code which tries to interpret locations should probably be designed to
+ // ignore those that it doesn't understand, as more types of locations could
+ // be recorded in the future.
Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
}
@@ -2715,8 +3024,8 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 {
// The name of the uninterpreted option. Each string represents a segment in
// a dot-separated name. is_extension is true iff a segment represents an
// extension (denoted with parentheses in options specs in .proto files).
-// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
-// "foo.(bar.baz).qux".
+// E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents
+// "foo.(bar.baz).moo".
type UninterpretedOption_NamePart struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -2781,23 +3090,34 @@ type SourceCodeInfo_Location struct {
// location.
//
// Each element is a field number or an index. They form a path from
- // the root FileDescriptorProto to the place where the definition. For
- // example, this path:
- // [ 4, 3, 2, 7, 1 ]
+ // the root FileDescriptorProto to the place where the definition occurs.
+ // For example, this path:
+ //
+ // [ 4, 3, 2, 7, 1 ]
+ //
// refers to:
- // file.message_type(3) // 4, 3
- // .field(7) // 2, 7
- // .name() // 1
+ //
+ // file.message_type(3) // 4, 3
+ // .field(7) // 2, 7
+ // .name() // 1
+ //
// This is because FileDescriptorProto.message_type has field number 4:
- // repeated DescriptorProto message_type = 4;
+ //
+ // repeated DescriptorProto message_type = 4;
+ //
// and DescriptorProto.field has field number 2:
- // repeated FieldDescriptorProto field = 2;
+ //
+ // repeated FieldDescriptorProto field = 2;
+ //
// and FieldDescriptorProto.name has field number 1:
- // optional string name = 1;
+ //
+ // optional string name = 1;
//
// Thus, the above path gives the location of a field name. If we removed
// the last element:
- // [ 4, 3, 2, 7 ]
+ //
+ // [ 4, 3, 2, 7 ]
+ //
// this path refers to the whole field declaration (from the beginning
// of the label to the terminating semicolon).
Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
@@ -2826,34 +3146,34 @@ type SourceCodeInfo_Location struct {
//
// Examples:
//
- // optional int32 foo = 1; // Comment attached to foo.
- // // Comment attached to bar.
- // optional int32 bar = 2;
+ // optional int32 foo = 1; // Comment attached to foo.
+ // // Comment attached to bar.
+ // optional int32 bar = 2;
//
- // optional string baz = 3;
- // // Comment attached to baz.
- // // Another line attached to baz.
+ // optional string baz = 3;
+ // // Comment attached to baz.
+ // // Another line attached to baz.
//
- // // Comment attached to qux.
- // //
- // // Another line attached to qux.
- // optional double qux = 4;
+ // // Comment attached to moo.
+ // //
+ // // Another line attached to moo.
+ // optional double moo = 4;
//
- // // Detached comment for corge. This is not leading or trailing comments
- // // to qux or corge because there are blank lines separating it from
- // // both.
+ // // Detached comment for corge. This is not leading or trailing comments
+ // // to moo or corge because there are blank lines separating it from
+ // // both.
//
- // // Detached comment for corge paragraph 2.
+ // // Detached comment for corge paragraph 2.
//
- // optional string corge = 5;
- // /* Block comment attached
- // * to corge. Leading asterisks
- // * will be removed. */
- // /* Block comment attached to
- // * grault. */
- // optional int32 grault = 6;
+ // optional string corge = 5;
+ // /* Block comment attached
+ // * to corge. Leading asterisks
+ // * will be removed. */
+ // /* Block comment attached to
+ // * grault. */
+ // optional int32 grault = 6;
//
- // // ignored detached comments.
+ // // ignored detached comments.
LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"`
TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"`
LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"`
@@ -2940,9 +3260,10 @@ type GeneratedCodeInfo_Annotation struct {
// that relates to the identified object.
Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"`
// Identifies the ending offset in bytes in the generated code that
- // relates to the identified offset. The end offset should be one past
+ // relates to the identified object. The end offset should be one past
// the last relevant byte (so the length of the text = end - begin).
- End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
+ End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
+ Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"`
}
func (x *GeneratedCodeInfo_Annotation) Reset() {
@@ -3005,6 +3326,13 @@ func (x *GeneratedCodeInfo_Annotation) GetEnd() int32 {
return 0
}
+func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotation_Semantic {
+ if x != nil && x.Semantic != nil {
+ return *x.Semantic
+ }
+ return GeneratedCodeInfo_Annotation_NONE
+}
+
var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor
var file_google_protobuf_descriptor_proto_rawDesc = []byte{
@@ -3016,7 +3344,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{
0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73,
0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69,
- 0x6c, 0x65, 0x22, 0xe4, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x6c, 0x65, 0x22, 0xfe, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18,
0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
@@ -3054,330 +3382,391 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{
0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65,
- 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43,
- 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73,
- 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79,
- 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74,
- 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74,
- 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d,
- 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52,
- 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74,
- 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x64, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66,
+ 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
+ 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a,
+ 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61,
- 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61,
- 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63,
- 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44,
- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09,
- 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73,
- 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
- 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67,
+ 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52,
- 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65,
- 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72,
- 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03,
- 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65,
- 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e,
- 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78,
- 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d,
- 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a,
- 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74,
- 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05,
- 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
- 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58,
- 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f,
- 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
- 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80,
- 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05,
- 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65,
- 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44,
- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c,
- 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74,
- 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c,
- 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
- 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74,
- 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65,
- 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65,
- 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66,
- 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65,
- 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a,
- 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73,
- 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a,
- 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79,
- 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c,
- 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41,
- 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36,
- 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54,
- 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54,
- 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58,
- 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46,
- 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45,
- 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f,
- 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45,
- 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45,
- 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59,
- 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59,
- 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54,
- 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59,
- 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a,
- 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10,
- 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10,
- 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34,
- 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c,
- 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12,
- 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45,
- 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50,
- 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66,
- 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
- 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a,
- 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61,
- 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
- 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75,
- 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61,
- 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d,
- 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
- 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67,
- 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65,
- 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d,
- 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
- 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73,
- 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74,
- 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74,
- 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65,
- 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65,
- 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
- 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
- 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
- 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f,
- 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
- 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52,
- 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
- 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12,
- 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65,
- 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c,
- 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05,
- 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69,
- 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10,
- 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67,
- 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73,
- 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91,
- 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21,
- 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67,
- 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f,
- 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e,
- 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74,
- 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08,
- 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c,
- 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61,
- 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61,
- 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28,
- 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68,
- 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f,
- 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08,
- 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72,
- 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c,
- 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01,
- 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53,
- 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f,
- 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18,
- 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65,
- 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
- 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f,
- 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
- 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a,
- 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63,
- 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a,
- 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69,
- 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45,
+ 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65,
+ 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a,
+ 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44,
+ 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55,
+ 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65,
+ 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
+ 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05,
+ 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61,
+ 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76,
+ 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a,
+ 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22,
+ 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e,
+ 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65,
+ 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75,
+ 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06,
+ 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75,
+ 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62,
+ 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05,
+ 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61,
+ 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23,
+ 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64,
+ 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49,
+ 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d,
+ 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b,
+ 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a,
+ 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a,
+ 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a,
+ 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e,
+ 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10,
+ 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06,
+ 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32,
+ 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10,
+ 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47,
+ 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50,
+ 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41,
+ 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54,
+ 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e,
+ 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e,
+ 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49,
+ 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f,
+ 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59,
+ 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54,
+ 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05,
+ 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f,
+ 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42,
+ 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a,
+ 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10,
+ 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a,
+ 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72,
+ 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a,
+ 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52,
+ 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e,
+ 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a,
+ 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a,
+ 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e,
+ 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f,
+ 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a,
+ 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e,
+ 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+ 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74,
+ 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
+ 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65,
+ 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73,
+ 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05,
+ 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72,
+ 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08,
+ 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53,
+ 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c,
+ 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61,
+ 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a,
+ 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f,
+ 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a,
+ 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66,
+ 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73,
+ 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46,
+ 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64,
+ 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52,
+ 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75,
+ 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61,
+ 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f,
+ 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73,
+ 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69,
+ 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
+ 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d,
+ 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b,
+ 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67,
+ 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63,
0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
- 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12,
- 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
- 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
- 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f,
- 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20,
- 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61,
- 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a,
- 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50,
- 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f,
- 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78,
- 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65,
- 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73,
- 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70,
- 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a,
- 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61,
- 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e,
- 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79,
- 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
- 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75,
- 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69,
- 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a,
- 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01,
- 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12,
- 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10,
- 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26,
- 0x10, 0x27, 0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
- 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d,
- 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72,
- 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61,
- 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63,
- 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
- 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44,
- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f,
- 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65,
- 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f,
- 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70,
- 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
+ 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11,
+ 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69,
+ 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08,
+ 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e,
+ 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13,
+ 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
+ 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28,
+ 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e,
+ 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a,
+ 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08,
+ 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
+ 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65,
+ 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74,
+ 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65,
+ 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73,
+ 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f,
+ 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12,
+ 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70,
+ 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72,
+ 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77,
+ 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a,
+ 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69,
+ 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73,
+ 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
+ 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16,
+ 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68,
+ 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61,
+ 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61,
+ 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e,
- 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a,
- 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09,
- 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xe2, 0x03, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22,
+ 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12,
+ 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f,
+ 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54,
+ 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07,
+ 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, 0x03, 0x0a,
+ 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+ 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77,
+ 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
+ 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a,
+ 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e,
+ 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64,
+ 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a,
+ 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
+ 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
+ 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65,
+ 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
+ 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42,
+ 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c,
+ 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f,
+ 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74,
+ 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
+ 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e,
+ 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04,
+ 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04,
+ 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xb7, 0x08, 0x0a, 0x0c, 0x46,
+ 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
+ 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a,
+ 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16,
+ 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06,
+ 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52,
- 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61,
- 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b,
- 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01,
- 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52,
- 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c,
- 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
- 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
- 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73,
- 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a,
- 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
- 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e,
- 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65,
- 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75,
- 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53,
- 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10,
- 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43,
- 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a,
- 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09,
- 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a,
- 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10,
- 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f,
- 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53,
+ 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12,
+ 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
+ 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e,
+ 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20,
+ 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65,
+ 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65,
+ 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05,
+ 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
+ 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a,
+ 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c,
+ 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01,
+ 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67,
+ 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c,
+ 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x12, 0x20,
+ 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75,
0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74,
0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69,
0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02,
- 0x22, 0xc0, 0x01, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61,
- 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65,
- 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e,
- 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65,
- 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75,
- 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08,
- 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a,
+ 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f,
+ 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50,
+ 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12,
+ 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d,
+ 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a,
+ 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e,
+ 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e,
+ 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14,
+ 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52,
+ 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54,
+ 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52,
+ 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
+ 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
+ 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47,
+ 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f,
+ 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52,
+ 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45,
+ 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
+ 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52,
+ 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05,
+ 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
+ 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54,
+ 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59,
+ 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
+ 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54,
+ 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f,
+ 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04,
+ 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
+ 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
+ 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74,
+ 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09,
+ 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x98, 0x02, 0x0a, 0x0b, 0x45, 0x6e,
+ 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c,
+ 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a,
+ 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65,
+ 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05,
+ 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
+ 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f,
+ 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c,
+ 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
+ 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69,
+ 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74,
+ 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13,
+ 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04,
+ 0x08, 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70,
+ 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
+ 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
+ 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
+ 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10,
+ 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72,
- 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
+ 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24,
@@ -3385,97 +3774,95 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{
0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70,
0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80,
- 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65,
- 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
- 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58,
- 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f,
- 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
+ 0x80, 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
+ 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73,
+ 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a,
+ 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76,
+ 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f,
+ 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74,
+ 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50,
+ 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10,
+ 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c,
+ 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
+ 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
+ 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64,
+ 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17,
+ 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e,
+ 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49,
+ 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a,
+ 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8,
+ 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e,
+ 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
- 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80,
- 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
- 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
- 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11,
- 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65,
- 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65,
- 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f,
- 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69,
- 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12,
- 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
- 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
- 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65,
- 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a,
- 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b,
- 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44,
- 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49,
- 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07,
- 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74,
- 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41,
- 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55,
- 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65,
- 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12,
- 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69,
- 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65,
- 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65,
- 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62,
- 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b,
- 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73,
- 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28,
- 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27,
- 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61,
- 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50,
- 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74,
- 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74,
- 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
- 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
- 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f,
- 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a,
- 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74,
- 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74,
- 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42,
- 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61,
- 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d,
- 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67,
- 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74,
- 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74,
- 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06,
- 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74,
- 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd1, 0x01,
- 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49,
- 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f,
- 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02,
- 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67,
- 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12,
- 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e,
- 0x64, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72,
+ 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64,
+ 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a,
+ 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74,
+ 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e,
+ 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76,
+ 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75,
+ 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52,
+ 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c,
+ 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01,
+ 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12,
+ 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67,
+ 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65,
+ 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72,
+ 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72,
+ 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43,
+ 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01,
+ 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61,
+ 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61,
+ 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05,
+ 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65,
+ 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d,
+ 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e,
+ 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e,
+ 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65,
+ 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18,
+ 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65,
+ 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0,
+ 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65,
+ 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05,
+ 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62,
+ 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69,
+ 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03,
+ 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65,
+ 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73,
+ 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e,
+ 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a,
+ 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10,
+ 0x02, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
@@ -3498,7 +3885,7 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte {
return file_google_protobuf_descriptor_proto_rawDescData
}
-var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 6)
+var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 9)
var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 27)
var file_google_protobuf_descriptor_proto_goTypes = []interface{}{
(FieldDescriptorProto_Type)(0), // 0: google.protobuf.FieldDescriptorProto.Type
@@ -3506,84 +3893,90 @@ var file_google_protobuf_descriptor_proto_goTypes = []interface{}{
(FileOptions_OptimizeMode)(0), // 2: google.protobuf.FileOptions.OptimizeMode
(FieldOptions_CType)(0), // 3: google.protobuf.FieldOptions.CType
(FieldOptions_JSType)(0), // 4: google.protobuf.FieldOptions.JSType
- (MethodOptions_IdempotencyLevel)(0), // 5: google.protobuf.MethodOptions.IdempotencyLevel
- (*FileDescriptorSet)(nil), // 6: google.protobuf.FileDescriptorSet
- (*FileDescriptorProto)(nil), // 7: google.protobuf.FileDescriptorProto
- (*DescriptorProto)(nil), // 8: google.protobuf.DescriptorProto
- (*ExtensionRangeOptions)(nil), // 9: google.protobuf.ExtensionRangeOptions
- (*FieldDescriptorProto)(nil), // 10: google.protobuf.FieldDescriptorProto
- (*OneofDescriptorProto)(nil), // 11: google.protobuf.OneofDescriptorProto
- (*EnumDescriptorProto)(nil), // 12: google.protobuf.EnumDescriptorProto
- (*EnumValueDescriptorProto)(nil), // 13: google.protobuf.EnumValueDescriptorProto
- (*ServiceDescriptorProto)(nil), // 14: google.protobuf.ServiceDescriptorProto
- (*MethodDescriptorProto)(nil), // 15: google.protobuf.MethodDescriptorProto
- (*FileOptions)(nil), // 16: google.protobuf.FileOptions
- (*MessageOptions)(nil), // 17: google.protobuf.MessageOptions
- (*FieldOptions)(nil), // 18: google.protobuf.FieldOptions
- (*OneofOptions)(nil), // 19: google.protobuf.OneofOptions
- (*EnumOptions)(nil), // 20: google.protobuf.EnumOptions
- (*EnumValueOptions)(nil), // 21: google.protobuf.EnumValueOptions
- (*ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions
- (*MethodOptions)(nil), // 23: google.protobuf.MethodOptions
- (*UninterpretedOption)(nil), // 24: google.protobuf.UninterpretedOption
- (*SourceCodeInfo)(nil), // 25: google.protobuf.SourceCodeInfo
- (*GeneratedCodeInfo)(nil), // 26: google.protobuf.GeneratedCodeInfo
- (*DescriptorProto_ExtensionRange)(nil), // 27: google.protobuf.DescriptorProto.ExtensionRange
- (*DescriptorProto_ReservedRange)(nil), // 28: google.protobuf.DescriptorProto.ReservedRange
- (*EnumDescriptorProto_EnumReservedRange)(nil), // 29: google.protobuf.EnumDescriptorProto.EnumReservedRange
- (*UninterpretedOption_NamePart)(nil), // 30: google.protobuf.UninterpretedOption.NamePart
- (*SourceCodeInfo_Location)(nil), // 31: google.protobuf.SourceCodeInfo.Location
- (*GeneratedCodeInfo_Annotation)(nil), // 32: google.protobuf.GeneratedCodeInfo.Annotation
+ (FieldOptions_OptionRetention)(0), // 5: google.protobuf.FieldOptions.OptionRetention
+ (FieldOptions_OptionTargetType)(0), // 6: google.protobuf.FieldOptions.OptionTargetType
+ (MethodOptions_IdempotencyLevel)(0), // 7: google.protobuf.MethodOptions.IdempotencyLevel
+ (GeneratedCodeInfo_Annotation_Semantic)(0), // 8: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+ (*FileDescriptorSet)(nil), // 9: google.protobuf.FileDescriptorSet
+ (*FileDescriptorProto)(nil), // 10: google.protobuf.FileDescriptorProto
+ (*DescriptorProto)(nil), // 11: google.protobuf.DescriptorProto
+ (*ExtensionRangeOptions)(nil), // 12: google.protobuf.ExtensionRangeOptions
+ (*FieldDescriptorProto)(nil), // 13: google.protobuf.FieldDescriptorProto
+ (*OneofDescriptorProto)(nil), // 14: google.protobuf.OneofDescriptorProto
+ (*EnumDescriptorProto)(nil), // 15: google.protobuf.EnumDescriptorProto
+ (*EnumValueDescriptorProto)(nil), // 16: google.protobuf.EnumValueDescriptorProto
+ (*ServiceDescriptorProto)(nil), // 17: google.protobuf.ServiceDescriptorProto
+ (*MethodDescriptorProto)(nil), // 18: google.protobuf.MethodDescriptorProto
+ (*FileOptions)(nil), // 19: google.protobuf.FileOptions
+ (*MessageOptions)(nil), // 20: google.protobuf.MessageOptions
+ (*FieldOptions)(nil), // 21: google.protobuf.FieldOptions
+ (*OneofOptions)(nil), // 22: google.protobuf.OneofOptions
+ (*EnumOptions)(nil), // 23: google.protobuf.EnumOptions
+ (*EnumValueOptions)(nil), // 24: google.protobuf.EnumValueOptions
+ (*ServiceOptions)(nil), // 25: google.protobuf.ServiceOptions
+ (*MethodOptions)(nil), // 26: google.protobuf.MethodOptions
+ (*UninterpretedOption)(nil), // 27: google.protobuf.UninterpretedOption
+ (*SourceCodeInfo)(nil), // 28: google.protobuf.SourceCodeInfo
+ (*GeneratedCodeInfo)(nil), // 29: google.protobuf.GeneratedCodeInfo
+ (*DescriptorProto_ExtensionRange)(nil), // 30: google.protobuf.DescriptorProto.ExtensionRange
+ (*DescriptorProto_ReservedRange)(nil), // 31: google.protobuf.DescriptorProto.ReservedRange
+ (*EnumDescriptorProto_EnumReservedRange)(nil), // 32: google.protobuf.EnumDescriptorProto.EnumReservedRange
+ (*UninterpretedOption_NamePart)(nil), // 33: google.protobuf.UninterpretedOption.NamePart
+ (*SourceCodeInfo_Location)(nil), // 34: google.protobuf.SourceCodeInfo.Location
+ (*GeneratedCodeInfo_Annotation)(nil), // 35: google.protobuf.GeneratedCodeInfo.Annotation
}
var file_google_protobuf_descriptor_proto_depIdxs = []int32{
- 7, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
- 8, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
- 12, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
- 14, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
- 10, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
- 16, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
- 25, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
- 10, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
- 10, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
- 8, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
- 12, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
- 27, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
- 11, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
- 17, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
- 28, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
- 24, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 10, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
+ 11, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
+ 15, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+ 17, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
+ 13, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+ 19, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
+ 28, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
+ 13, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
+ 13, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+ 11, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
+ 15, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+ 30, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
+ 14, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
+ 20, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
+ 31, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
+ 27, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
1, // 16: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
0, // 17: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
- 18, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
- 19, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
- 13, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
- 20, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
- 29, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
- 21, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
- 15, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
- 22, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
- 23, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
+ 21, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
+ 22, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
+ 16, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
+ 23, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
+ 32, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
+ 24, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
+ 18, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
+ 25, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
+ 26, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
2, // 27: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
- 24, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 24, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 27, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 27, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
3, // 30: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
4, // 31: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
- 24, // 32: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 24, // 33: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 24, // 34: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 24, // 35: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 24, // 36: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 5, // 37: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
- 24, // 38: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 30, // 39: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
- 31, // 40: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
- 32, // 41: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
- 9, // 42: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
- 43, // [43:43] is the sub-list for method output_type
- 43, // [43:43] is the sub-list for method input_type
- 43, // [43:43] is the sub-list for extension type_name
- 43, // [43:43] is the sub-list for extension extendee
- 0, // [0:43] is the sub-list for field type_name
+ 5, // 32: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
+ 6, // 33: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType
+ 27, // 34: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 27, // 35: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 27, // 36: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 27, // 37: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 27, // 38: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 7, // 39: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
+ 27, // 40: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 33, // 41: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
+ 34, // 42: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
+ 35, // 43: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
+ 12, // 44: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
+ 8, // 45: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+ 46, // [46:46] is the sub-list for method output_type
+ 46, // [46:46] is the sub-list for method input_type
+ 46, // [46:46] is the sub-list for extension type_name
+ 46, // [46:46] is the sub-list for extension extendee
+ 0, // [0:46] is the sub-list for field type_name
}
func init() { file_google_protobuf_descriptor_proto_init() }
@@ -3940,7 +4333,7 @@ func file_google_protobuf_descriptor_proto_init() {
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc,
- NumEnums: 6,
+ NumEnums: 9,
NumMessages: 27,
NumExtensions: 0,
NumServices: 0,
diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go
new file mode 100644
index 00000000..f77ef0de
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go
@@ -0,0 +1,717 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package dynamicpb creates protocol buffer messages using runtime type information.
+package dynamicpb
+
+import (
+ "math"
+
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// enum is a dynamic protoreflect.Enum.
+type enum struct {
+ num protoreflect.EnumNumber
+ typ protoreflect.EnumType
+}
+
+func (e enum) Descriptor() protoreflect.EnumDescriptor { return e.typ.Descriptor() }
+func (e enum) Type() protoreflect.EnumType { return e.typ }
+func (e enum) Number() protoreflect.EnumNumber { return e.num }
+
+// enumType is a dynamic protoreflect.EnumType.
+type enumType struct {
+ desc protoreflect.EnumDescriptor
+}
+
+// NewEnumType creates a new EnumType with the provided descriptor.
+//
+// EnumTypes created by this package are equal if their descriptors are equal.
+// That is, if ed1 == ed2, then NewEnumType(ed1) == NewEnumType(ed2).
+//
+// Enum values created by the EnumType are equal if their numbers are equal.
+func NewEnumType(desc protoreflect.EnumDescriptor) protoreflect.EnumType {
+ return enumType{desc}
+}
+
+func (et enumType) New(n protoreflect.EnumNumber) protoreflect.Enum { return enum{n, et} }
+func (et enumType) Descriptor() protoreflect.EnumDescriptor { return et.desc }
+
+// extensionType is a dynamic protoreflect.ExtensionType.
+type extensionType struct {
+ desc extensionTypeDescriptor
+}
+
+// A Message is a dynamically constructed protocol buffer message.
+//
+// Message implements the proto.Message interface, and may be used with all
+// standard proto package functions such as Marshal, Unmarshal, and so forth.
+//
+// Message also implements the protoreflect.Message interface. See the protoreflect
+// package documentation for that interface for how to get and set fields and
+// otherwise interact with the contents of a Message.
+//
+// Reflection API functions which construct messages, such as NewField,
+// return new dynamic messages of the appropriate type. Functions which take
+// messages, such as Set for a message-value field, will accept any message
+// with a compatible type.
+//
+// Operations which modify a Message are not safe for concurrent use.
+type Message struct {
+ typ messageType
+ known map[protoreflect.FieldNumber]protoreflect.Value
+ ext map[protoreflect.FieldNumber]protoreflect.FieldDescriptor
+ unknown protoreflect.RawFields
+}
+
+var (
+ _ protoreflect.Message = (*Message)(nil)
+ _ protoreflect.ProtoMessage = (*Message)(nil)
+ _ protoiface.MessageV1 = (*Message)(nil)
+)
+
+// NewMessage creates a new message with the provided descriptor.
+func NewMessage(desc protoreflect.MessageDescriptor) *Message {
+ return &Message{
+ typ: messageType{desc},
+ known: make(map[protoreflect.FieldNumber]protoreflect.Value),
+ ext: make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor),
+ }
+}
+
+// ProtoMessage implements the legacy message interface.
+func (m *Message) ProtoMessage() {}
+
+// ProtoReflect implements the protoreflect.ProtoMessage interface.
+func (m *Message) ProtoReflect() protoreflect.Message {
+ return m
+}
+
+// String returns a string representation of a message.
+func (m *Message) String() string {
+ return protoimpl.X.MessageStringOf(m)
+}
+
+// Reset clears the message to be empty, but preserves the dynamic message type.
+func (m *Message) Reset() {
+ m.known = make(map[protoreflect.FieldNumber]protoreflect.Value)
+ m.ext = make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor)
+ m.unknown = nil
+}
+
+// Descriptor returns the message descriptor.
+func (m *Message) Descriptor() protoreflect.MessageDescriptor {
+ return m.typ.desc
+}
+
+// Type returns the message type.
+func (m *Message) Type() protoreflect.MessageType {
+ return m.typ
+}
+
+// New returns a newly allocated empty message with the same descriptor.
+// See protoreflect.Message for details.
+func (m *Message) New() protoreflect.Message {
+ return m.Type().New()
+}
+
+// Interface returns the message.
+// See protoreflect.Message for details.
+func (m *Message) Interface() protoreflect.ProtoMessage {
+ return m
+}
+
+// ProtoMethods is an internal detail of the protoreflect.Message interface.
+// Users should never call this directly.
+func (m *Message) ProtoMethods() *protoiface.Methods {
+ return nil
+}
+
+// Range visits every populated field in undefined order.
+// See protoreflect.Message for details.
+func (m *Message) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
+ for num, v := range m.known {
+ fd := m.ext[num]
+ if fd == nil {
+ fd = m.Descriptor().Fields().ByNumber(num)
+ }
+ if !isSet(fd, v) {
+ continue
+ }
+ if !f(fd, v) {
+ return
+ }
+ }
+}
+
+// Has reports whether a field is populated.
+// See protoreflect.Message for details.
+func (m *Message) Has(fd protoreflect.FieldDescriptor) bool {
+ m.checkField(fd)
+ if fd.IsExtension() && m.ext[fd.Number()] != fd {
+ return false
+ }
+ v, ok := m.known[fd.Number()]
+ if !ok {
+ return false
+ }
+ return isSet(fd, v)
+}
+
+// Clear clears a field.
+// See protoreflect.Message for details.
+func (m *Message) Clear(fd protoreflect.FieldDescriptor) {
+ m.checkField(fd)
+ num := fd.Number()
+ delete(m.known, num)
+ delete(m.ext, num)
+}
+
+// Get returns the value of a field.
+// See protoreflect.Message for details.
+func (m *Message) Get(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.checkField(fd)
+ num := fd.Number()
+ if fd.IsExtension() {
+ if fd != m.ext[num] {
+ return fd.(protoreflect.ExtensionTypeDescriptor).Type().Zero()
+ }
+ return m.known[num]
+ }
+ if v, ok := m.known[num]; ok {
+ switch {
+ case fd.IsMap():
+ if v.Map().Len() > 0 {
+ return v
+ }
+ case fd.IsList():
+ if v.List().Len() > 0 {
+ return v
+ }
+ default:
+ return v
+ }
+ }
+ switch {
+ case fd.IsMap():
+ return protoreflect.ValueOfMap(&dynamicMap{desc: fd})
+ case fd.IsList():
+ return protoreflect.ValueOfList(emptyList{desc: fd})
+ case fd.Message() != nil:
+ return protoreflect.ValueOfMessage(&Message{typ: messageType{fd.Message()}})
+ case fd.Kind() == protoreflect.BytesKind:
+ return protoreflect.ValueOfBytes(append([]byte(nil), fd.Default().Bytes()...))
+ default:
+ return fd.Default()
+ }
+}
+
+// Mutable returns a mutable reference to a repeated, map, or message field.
+// See protoreflect.Message for details.
+func (m *Message) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.checkField(fd)
+ if !fd.IsMap() && !fd.IsList() && fd.Message() == nil {
+ panic(errors.New("%v: getting mutable reference to non-composite type", fd.FullName()))
+ }
+ if m.known == nil {
+ panic(errors.New("%v: modification of read-only message", fd.FullName()))
+ }
+ num := fd.Number()
+ if fd.IsExtension() {
+ if fd != m.ext[num] {
+ m.ext[num] = fd
+ m.known[num] = fd.(protoreflect.ExtensionTypeDescriptor).Type().New()
+ }
+ return m.known[num]
+ }
+ if v, ok := m.known[num]; ok {
+ return v
+ }
+ m.clearOtherOneofFields(fd)
+ m.known[num] = m.NewField(fd)
+ if fd.IsExtension() {
+ m.ext[num] = fd
+ }
+ return m.known[num]
+}
+
+// Set stores a value in a field.
+// See protoreflect.Message for details.
+func (m *Message) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
+ m.checkField(fd)
+ if m.known == nil {
+ panic(errors.New("%v: modification of read-only message", fd.FullName()))
+ }
+ if fd.IsExtension() {
+ isValid := true
+ switch {
+ case !fd.(protoreflect.ExtensionTypeDescriptor).Type().IsValidValue(v):
+ isValid = false
+ case fd.IsList():
+ isValid = v.List().IsValid()
+ case fd.IsMap():
+ isValid = v.Map().IsValid()
+ case fd.Message() != nil:
+ isValid = v.Message().IsValid()
+ }
+ if !isValid {
+ panic(errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface()))
+ }
+ m.ext[fd.Number()] = fd
+ } else {
+ typecheck(fd, v)
+ }
+ m.clearOtherOneofFields(fd)
+ m.known[fd.Number()] = v
+}
+
+func (m *Message) clearOtherOneofFields(fd protoreflect.FieldDescriptor) {
+ od := fd.ContainingOneof()
+ if od == nil {
+ return
+ }
+ num := fd.Number()
+ for i := 0; i < od.Fields().Len(); i++ {
+ if n := od.Fields().Get(i).Number(); n != num {
+ delete(m.known, n)
+ }
+ }
+}
+
+// NewField returns a new value for assignable to the field of a given descriptor.
+// See protoreflect.Message for details.
+func (m *Message) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.checkField(fd)
+ switch {
+ case fd.IsExtension():
+ return fd.(protoreflect.ExtensionTypeDescriptor).Type().New()
+ case fd.IsMap():
+ return protoreflect.ValueOfMap(&dynamicMap{
+ desc: fd,
+ mapv: make(map[interface{}]protoreflect.Value),
+ })
+ case fd.IsList():
+ return protoreflect.ValueOfList(&dynamicList{desc: fd})
+ case fd.Message() != nil:
+ return protoreflect.ValueOfMessage(NewMessage(fd.Message()).ProtoReflect())
+ default:
+ return fd.Default()
+ }
+}
+
+// WhichOneof reports which field in a oneof is populated, returning nil if none are populated.
+// See protoreflect.Message for details.
+func (m *Message) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
+ for i := 0; i < od.Fields().Len(); i++ {
+ fd := od.Fields().Get(i)
+ if m.Has(fd) {
+ return fd
+ }
+ }
+ return nil
+}
+
+// GetUnknown returns the raw unknown fields.
+// See protoreflect.Message for details.
+func (m *Message) GetUnknown() protoreflect.RawFields {
+ return m.unknown
+}
+
+// SetUnknown sets the raw unknown fields.
+// See protoreflect.Message for details.
+func (m *Message) SetUnknown(r protoreflect.RawFields) {
+ if m.known == nil {
+ panic(errors.New("%v: modification of read-only message", m.typ.desc.FullName()))
+ }
+ m.unknown = r
+}
+
+// IsValid reports whether the message is valid.
+// See protoreflect.Message for details.
+func (m *Message) IsValid() bool {
+ return m.known != nil
+}
+
+func (m *Message) checkField(fd protoreflect.FieldDescriptor) {
+ if fd.IsExtension() && fd.ContainingMessage().FullName() == m.Descriptor().FullName() {
+ if _, ok := fd.(protoreflect.ExtensionTypeDescriptor); !ok {
+ panic(errors.New("%v: extension field descriptor does not implement ExtensionTypeDescriptor", fd.FullName()))
+ }
+ return
+ }
+ if fd.Parent() == m.Descriptor() {
+ return
+ }
+ fields := m.Descriptor().Fields()
+ index := fd.Index()
+ if index >= fields.Len() || fields.Get(index) != fd {
+ panic(errors.New("%v: field descriptor does not belong to this message", fd.FullName()))
+ }
+}
+
+type messageType struct {
+ desc protoreflect.MessageDescriptor
+}
+
+// NewMessageType creates a new MessageType with the provided descriptor.
+//
+// MessageTypes created by this package are equal if their descriptors are equal.
+// That is, if md1 == md2, then NewMessageType(md1) == NewMessageType(md2).
+func NewMessageType(desc protoreflect.MessageDescriptor) protoreflect.MessageType {
+ return messageType{desc}
+}
+
+func (mt messageType) New() protoreflect.Message { return NewMessage(mt.desc) }
+func (mt messageType) Zero() protoreflect.Message { return &Message{typ: messageType{mt.desc}} }
+func (mt messageType) Descriptor() protoreflect.MessageDescriptor { return mt.desc }
+func (mt messageType) Enum(i int) protoreflect.EnumType {
+ if ed := mt.desc.Fields().Get(i).Enum(); ed != nil {
+ return NewEnumType(ed)
+ }
+ return nil
+}
+func (mt messageType) Message(i int) protoreflect.MessageType {
+ if md := mt.desc.Fields().Get(i).Message(); md != nil {
+ return NewMessageType(md)
+ }
+ return nil
+}
+
+type emptyList struct {
+ desc protoreflect.FieldDescriptor
+}
+
+func (x emptyList) Len() int { return 0 }
+func (x emptyList) Get(n int) protoreflect.Value { panic(errors.New("out of range")) }
+func (x emptyList) Set(n int, v protoreflect.Value) {
+ panic(errors.New("modification of immutable list"))
+}
+func (x emptyList) Append(v protoreflect.Value) { panic(errors.New("modification of immutable list")) }
+func (x emptyList) AppendMutable() protoreflect.Value {
+ panic(errors.New("modification of immutable list"))
+}
+func (x emptyList) Truncate(n int) { panic(errors.New("modification of immutable list")) }
+func (x emptyList) NewElement() protoreflect.Value { return newListEntry(x.desc) }
+func (x emptyList) IsValid() bool { return false }
+
+type dynamicList struct {
+ desc protoreflect.FieldDescriptor
+ list []protoreflect.Value
+}
+
+func (x *dynamicList) Len() int {
+ return len(x.list)
+}
+
+func (x *dynamicList) Get(n int) protoreflect.Value {
+ return x.list[n]
+}
+
+func (x *dynamicList) Set(n int, v protoreflect.Value) {
+ typecheckSingular(x.desc, v)
+ x.list[n] = v
+}
+
+func (x *dynamicList) Append(v protoreflect.Value) {
+ typecheckSingular(x.desc, v)
+ x.list = append(x.list, v)
+}
+
+func (x *dynamicList) AppendMutable() protoreflect.Value {
+ if x.desc.Message() == nil {
+ panic(errors.New("%v: invalid AppendMutable on list with non-message type", x.desc.FullName()))
+ }
+ v := x.NewElement()
+ x.Append(v)
+ return v
+}
+
+func (x *dynamicList) Truncate(n int) {
+ // Zero truncated elements to avoid keeping data live.
+ for i := n; i < len(x.list); i++ {
+ x.list[i] = protoreflect.Value{}
+ }
+ x.list = x.list[:n]
+}
+
+func (x *dynamicList) NewElement() protoreflect.Value {
+ return newListEntry(x.desc)
+}
+
+func (x *dynamicList) IsValid() bool {
+ return true
+}
+
+type dynamicMap struct {
+ desc protoreflect.FieldDescriptor
+ mapv map[interface{}]protoreflect.Value
+}
+
+func (x *dynamicMap) Get(k protoreflect.MapKey) protoreflect.Value { return x.mapv[k.Interface()] }
+func (x *dynamicMap) Set(k protoreflect.MapKey, v protoreflect.Value) {
+ typecheckSingular(x.desc.MapKey(), k.Value())
+ typecheckSingular(x.desc.MapValue(), v)
+ x.mapv[k.Interface()] = v
+}
+func (x *dynamicMap) Has(k protoreflect.MapKey) bool { return x.Get(k).IsValid() }
+func (x *dynamicMap) Clear(k protoreflect.MapKey) { delete(x.mapv, k.Interface()) }
+func (x *dynamicMap) Mutable(k protoreflect.MapKey) protoreflect.Value {
+ if x.desc.MapValue().Message() == nil {
+ panic(errors.New("%v: invalid Mutable on map with non-message value type", x.desc.FullName()))
+ }
+ v := x.Get(k)
+ if !v.IsValid() {
+ v = x.NewValue()
+ x.Set(k, v)
+ }
+ return v
+}
+func (x *dynamicMap) Len() int { return len(x.mapv) }
+func (x *dynamicMap) NewValue() protoreflect.Value {
+ if md := x.desc.MapValue().Message(); md != nil {
+ return protoreflect.ValueOfMessage(NewMessage(md).ProtoReflect())
+ }
+ return x.desc.MapValue().Default()
+}
+func (x *dynamicMap) IsValid() bool {
+ return x.mapv != nil
+}
+
+func (x *dynamicMap) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) {
+ for k, v := range x.mapv {
+ if !f(protoreflect.ValueOf(k).MapKey(), v) {
+ return
+ }
+ }
+}
+
+func isSet(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ switch {
+ case fd.IsMap():
+ return v.Map().Len() > 0
+ case fd.IsList():
+ return v.List().Len() > 0
+ case fd.ContainingOneof() != nil:
+ return true
+ case fd.Syntax() == protoreflect.Proto3 && !fd.IsExtension():
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ return v.Bool()
+ case protoreflect.EnumKind:
+ return v.Enum() != 0
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind:
+ return v.Int() != 0
+ case protoreflect.Uint32Kind, protoreflect.Uint64Kind, protoreflect.Fixed32Kind, protoreflect.Fixed64Kind:
+ return v.Uint() != 0
+ case protoreflect.FloatKind, protoreflect.DoubleKind:
+ return v.Float() != 0 || math.Signbit(v.Float())
+ case protoreflect.StringKind:
+ return v.String() != ""
+ case protoreflect.BytesKind:
+ return len(v.Bytes()) > 0
+ }
+ }
+ return true
+}
+
+func typecheck(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
+ if err := typeIsValid(fd, v); err != nil {
+ panic(err)
+ }
+}
+
+func typeIsValid(fd protoreflect.FieldDescriptor, v protoreflect.Value) error {
+ switch {
+ case !v.IsValid():
+ return errors.New("%v: assigning invalid value", fd.FullName())
+ case fd.IsMap():
+ if mapv, ok := v.Interface().(*dynamicMap); !ok || mapv.desc != fd || !mapv.IsValid() {
+ return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface())
+ }
+ return nil
+ case fd.IsList():
+ switch list := v.Interface().(type) {
+ case *dynamicList:
+ if list.desc == fd && list.IsValid() {
+ return nil
+ }
+ case emptyList:
+ if list.desc == fd && list.IsValid() {
+ return nil
+ }
+ }
+ return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface())
+ default:
+ return singularTypeIsValid(fd, v)
+ }
+}
+
+func typecheckSingular(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
+ if err := singularTypeIsValid(fd, v); err != nil {
+ panic(err)
+ }
+}
+
+func singularTypeIsValid(fd protoreflect.FieldDescriptor, v protoreflect.Value) error {
+ vi := v.Interface()
+ var ok bool
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ _, ok = vi.(bool)
+ case protoreflect.EnumKind:
+ // We could check against the valid set of enum values, but do not.
+ _, ok = vi.(protoreflect.EnumNumber)
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+ _, ok = vi.(int32)
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+ _, ok = vi.(uint32)
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ _, ok = vi.(int64)
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ _, ok = vi.(uint64)
+ case protoreflect.FloatKind:
+ _, ok = vi.(float32)
+ case protoreflect.DoubleKind:
+ _, ok = vi.(float64)
+ case protoreflect.StringKind:
+ _, ok = vi.(string)
+ case protoreflect.BytesKind:
+ _, ok = vi.([]byte)
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ var m protoreflect.Message
+ m, ok = vi.(protoreflect.Message)
+ if ok && m.Descriptor().FullName() != fd.Message().FullName() {
+ return errors.New("%v: assigning invalid message type %v", fd.FullName(), m.Descriptor().FullName())
+ }
+ if dm, ok := vi.(*Message); ok && dm.known == nil {
+ return errors.New("%v: assigning invalid zero-value message", fd.FullName())
+ }
+ }
+ if !ok {
+ return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface())
+ }
+ return nil
+}
+
+func newListEntry(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ return protoreflect.ValueOfBool(false)
+ case protoreflect.EnumKind:
+ return protoreflect.ValueOfEnum(fd.Enum().Values().Get(0).Number())
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+ return protoreflect.ValueOfInt32(0)
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+ return protoreflect.ValueOfUint32(0)
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ return protoreflect.ValueOfInt64(0)
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ return protoreflect.ValueOfUint64(0)
+ case protoreflect.FloatKind:
+ return protoreflect.ValueOfFloat32(0)
+ case protoreflect.DoubleKind:
+ return protoreflect.ValueOfFloat64(0)
+ case protoreflect.StringKind:
+ return protoreflect.ValueOfString("")
+ case protoreflect.BytesKind:
+ return protoreflect.ValueOfBytes(nil)
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ return protoreflect.ValueOfMessage(NewMessage(fd.Message()).ProtoReflect())
+ }
+ panic(errors.New("%v: unknown kind %v", fd.FullName(), fd.Kind()))
+}
+
+// NewExtensionType creates a new ExtensionType with the provided descriptor.
+//
+// Dynamic ExtensionTypes with the same descriptor compare as equal. That is,
+// if xd1 == xd2, then NewExtensionType(xd1) == NewExtensionType(xd2).
+//
+// The InterfaceOf and ValueOf methods of the extension type are defined as:
+//
+// func (xt extensionType) ValueOf(iv interface{}) protoreflect.Value {
+// return protoreflect.ValueOf(iv)
+// }
+//
+// func (xt extensionType) InterfaceOf(v protoreflect.Value) interface{} {
+// return v.Interface()
+// }
+//
+// The Go type used by the proto.GetExtension and proto.SetExtension functions
+// is determined by these methods, and is therefore equivalent to the Go type
+// used to represent a protoreflect.Value. See the protoreflect.Value
+// documentation for more details.
+func NewExtensionType(desc protoreflect.ExtensionDescriptor) protoreflect.ExtensionType {
+ if xt, ok := desc.(protoreflect.ExtensionTypeDescriptor); ok {
+ desc = xt.Descriptor()
+ }
+ return extensionType{extensionTypeDescriptor{desc}}
+}
+
+func (xt extensionType) New() protoreflect.Value {
+ switch {
+ case xt.desc.IsMap():
+ return protoreflect.ValueOfMap(&dynamicMap{
+ desc: xt.desc,
+ mapv: make(map[interface{}]protoreflect.Value),
+ })
+ case xt.desc.IsList():
+ return protoreflect.ValueOfList(&dynamicList{desc: xt.desc})
+ case xt.desc.Message() != nil:
+ return protoreflect.ValueOfMessage(NewMessage(xt.desc.Message()))
+ default:
+ return xt.desc.Default()
+ }
+}
+
+func (xt extensionType) Zero() protoreflect.Value {
+ switch {
+ case xt.desc.IsMap():
+ return protoreflect.ValueOfMap(&dynamicMap{desc: xt.desc})
+ case xt.desc.Cardinality() == protoreflect.Repeated:
+ return protoreflect.ValueOfList(emptyList{desc: xt.desc})
+ case xt.desc.Message() != nil:
+ return protoreflect.ValueOfMessage(&Message{typ: messageType{xt.desc.Message()}})
+ default:
+ return xt.desc.Default()
+ }
+}
+
+func (xt extensionType) TypeDescriptor() protoreflect.ExtensionTypeDescriptor {
+ return xt.desc
+}
+
+func (xt extensionType) ValueOf(iv interface{}) protoreflect.Value {
+ v := protoreflect.ValueOf(iv)
+ typecheck(xt.desc, v)
+ return v
+}
+
+func (xt extensionType) InterfaceOf(v protoreflect.Value) interface{} {
+ typecheck(xt.desc, v)
+ return v.Interface()
+}
+
+func (xt extensionType) IsValidInterface(iv interface{}) bool {
+ return typeIsValid(xt.desc, protoreflect.ValueOf(iv)) == nil
+}
+
+func (xt extensionType) IsValidValue(v protoreflect.Value) bool {
+ return typeIsValid(xt.desc, v) == nil
+}
+
+type extensionTypeDescriptor struct {
+ protoreflect.ExtensionDescriptor
+}
+
+func (xt extensionTypeDescriptor) Type() protoreflect.ExtensionType {
+ return extensionType{xt}
+}
+
+func (xt extensionTypeDescriptor) Descriptor() protoreflect.ExtensionDescriptor {
+ return xt.ExtensionDescriptor
+}
diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
index 8c10797b..a6c7a33f 100644
--- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -37,8 +37,7 @@
// It is functionally a tuple of the full name of the remote message type and
// the serialized bytes of the remote message value.
//
-//
-// Constructing an Any
+// # Constructing an Any
//
// An Any message containing another message value is constructed using New:
//
@@ -48,8 +47,7 @@
// }
// ... // make use of any
//
-//
-// Unmarshaling an Any
+// # Unmarshaling an Any
//
// With a populated Any message, the underlying message can be serialized into
// a remote concrete message value in a few ways.
@@ -95,8 +93,7 @@
// listed in the case clauses are linked into the Go binary and therefore also
// registered in the global registry.
//
-//
-// Type checking an Any
+// # Type checking an Any
//
// In order to type check whether an Any message represents some other message,
// then use the MessageIs method:
@@ -115,7 +112,6 @@
// }
// ... // make use of m
// }
-//
package anypb
import (
@@ -136,45 +132,49 @@ import (
//
// Example 1: Pack and unpack a message in C++.
//
-// Foo foo = ...;
-// Any any;
-// any.PackFrom(foo);
-// ...
-// if (any.UnpackTo(&foo)) {
-// ...
-// }
+// Foo foo = ...;
+// Any any;
+// any.PackFrom(foo);
+// ...
+// if (any.UnpackTo(&foo)) {
+// ...
+// }
//
// Example 2: Pack and unpack a message in Java.
//
-// Foo foo = ...;
-// Any any = Any.pack(foo);
-// ...
-// if (any.is(Foo.class)) {
-// foo = any.unpack(Foo.class);
-// }
-//
-// Example 3: Pack and unpack a message in Python.
-//
-// foo = Foo(...)
-// any = Any()
-// any.Pack(foo)
-// ...
-// if any.Is(Foo.DESCRIPTOR):
-// any.Unpack(foo)
-// ...
-//
-// Example 4: Pack and unpack a message in Go
-//
-// foo := &pb.Foo{...}
-// any, err := anypb.New(foo)
-// if err != nil {
-// ...
-// }
-// ...
-// foo := &pb.Foo{}
-// if err := any.UnmarshalTo(foo); err != nil {
-// ...
-// }
+// Foo foo = ...;
+// Any any = Any.pack(foo);
+// ...
+// if (any.is(Foo.class)) {
+// foo = any.unpack(Foo.class);
+// }
+// // or ...
+// if (any.isSameTypeAs(Foo.getDefaultInstance())) {
+// foo = any.unpack(Foo.getDefaultInstance());
+// }
+//
+// Example 3: Pack and unpack a message in Python.
+//
+// foo = Foo(...)
+// any = Any()
+// any.Pack(foo)
+// ...
+// if any.Is(Foo.DESCRIPTOR):
+// any.Unpack(foo)
+// ...
+//
+// Example 4: Pack and unpack a message in Go
+//
+// foo := &pb.Foo{...}
+// any, err := anypb.New(foo)
+// if err != nil {
+// ...
+// }
+// ...
+// foo := &pb.Foo{}
+// if err := any.UnmarshalTo(foo); err != nil {
+// ...
+// }
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
@@ -182,35 +182,33 @@ import (
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
+// # JSON
//
-// JSON
-// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
-// package google.profile;
-// message Person {
-// string first_name = 1;
-// string last_name = 2;
-// }
+// package google.profile;
+// message Person {
+// string first_name = 1;
+// string last_name = 2;
+// }
//
-// {
-// "@type": "type.googleapis.com/google.profile.Person",
-// "firstName": ,
-// "lastName":
-// }
+// {
+// "@type": "type.googleapis.com/google.profile.Person",
+// "firstName": ,
+// "lastName":
+// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
-// {
-// "@type": "type.googleapis.com/google.protobuf.Duration",
-// "value": "1.212s"
-// }
-//
+// {
+// "@type": "type.googleapis.com/google.protobuf.Duration",
+// "value": "1.212s"
+// }
type Any struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -228,14 +226,14 @@ type Any struct {
// scheme `http`, `https`, or no scheme, one can optionally set up a type
// server that maps type URLs to message definitions as follows:
//
- // * If no scheme is provided, `https` is assumed.
- // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
- // value in binary format, or produce an error.
- // * Applications are allowed to cache lookup results based on the
- // URL, or have them precompiled into a binary to avoid any
- // lookup. Therefore, binary compatibility needs to be preserved
- // on changes to types. (Use versioned type names to manage
- // breaking changes.)
+ // - If no scheme is provided, `https` is assumed.
+ // - An HTTP GET on the URL must yield a [google.protobuf.Type][]
+ // value in binary format, or produce an error.
+ // - Applications are allowed to cache lookup results based on the
+ // URL, or have them precompiled into a binary to avoid any
+ // lookup. Therefore, binary compatibility needs to be preserved
+ // on changes to types. (Use versioned type names to manage
+ // breaking changes.)
//
// Note: this functionality is not currently available in the official
// protobuf release, and it is not used for type URLs beginning with
@@ -243,7 +241,6 @@ type Any struct {
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
- //
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
index a583ca2f..df709a8d 100644
--- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
@@ -35,8 +35,7 @@
//
// The Duration message represents a signed span of time.
//
-//
-// Conversion to a Go Duration
+// # Conversion to a Go Duration
//
// The AsDuration method can be used to convert a Duration message to a
// standard Go time.Duration value:
@@ -65,15 +64,13 @@
// the resulting value to the closest representable value (e.g., math.MaxInt64
// for positive overflow and math.MinInt64 for negative overflow).
//
-//
-// Conversion from a Go Duration
+// # Conversion from a Go Duration
//
// The durationpb.New function can be used to construct a Duration message
// from a standard Go time.Duration value:
//
// dur := durationpb.New(d)
// ... // make use of d as a *durationpb.Duration
-//
package durationpb
import (
@@ -96,43 +93,43 @@ import (
//
// Example 1: Compute Duration from two Timestamps in pseudo code.
//
-// Timestamp start = ...;
-// Timestamp end = ...;
-// Duration duration = ...;
+// Timestamp start = ...;
+// Timestamp end = ...;
+// Duration duration = ...;
//
-// duration.seconds = end.seconds - start.seconds;
-// duration.nanos = end.nanos - start.nanos;
+// duration.seconds = end.seconds - start.seconds;
+// duration.nanos = end.nanos - start.nanos;
//
-// if (duration.seconds < 0 && duration.nanos > 0) {
-// duration.seconds += 1;
-// duration.nanos -= 1000000000;
-// } else if (duration.seconds > 0 && duration.nanos < 0) {
-// duration.seconds -= 1;
-// duration.nanos += 1000000000;
-// }
+// if (duration.seconds < 0 && duration.nanos > 0) {
+// duration.seconds += 1;
+// duration.nanos -= 1000000000;
+// } else if (duration.seconds > 0 && duration.nanos < 0) {
+// duration.seconds -= 1;
+// duration.nanos += 1000000000;
+// }
//
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
//
-// Timestamp start = ...;
-// Duration duration = ...;
-// Timestamp end = ...;
+// Timestamp start = ...;
+// Duration duration = ...;
+// Timestamp end = ...;
//
-// end.seconds = start.seconds + duration.seconds;
-// end.nanos = start.nanos + duration.nanos;
+// end.seconds = start.seconds + duration.seconds;
+// end.nanos = start.nanos + duration.nanos;
//
-// if (end.nanos < 0) {
-// end.seconds -= 1;
-// end.nanos += 1000000000;
-// } else if (end.nanos >= 1000000000) {
-// end.seconds += 1;
-// end.nanos -= 1000000000;
-// }
+// if (end.nanos < 0) {
+// end.seconds -= 1;
+// end.nanos += 1000000000;
+// } else if (end.nanos >= 1000000000) {
+// end.seconds += 1;
+// end.nanos -= 1000000000;
+// }
//
// Example 3: Compute Duration from datetime.timedelta in Python.
//
-// td = datetime.timedelta(days=3, minutes=10)
-// duration = Duration()
-// duration.FromTimedelta(td)
+// td = datetime.timedelta(days=3, minutes=10)
+// duration = Duration()
+// duration.FromTimedelta(td)
//
// # JSON Mapping
//
@@ -143,8 +140,6 @@ import (
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
// microsecond should be expressed in JSON format as "3.000001s".
-//
-//
type Duration struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
index c9ae9213..61f69fc1 100644
--- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -36,8 +36,7 @@
// The Timestamp message represents a timestamp,
// an instant in time since the Unix epoch (January 1st, 1970).
//
-//
-// Conversion to a Go Time
+// # Conversion to a Go Time
//
// The AsTime method can be used to convert a Timestamp message to a
// standard Go time.Time value in UTC:
@@ -59,8 +58,7 @@
// ... // handle error
// }
//
-//
-// Conversion from a Go Time
+// # Conversion from a Go Time
//
// The timestamppb.New function can be used to construct a Timestamp message
// from a standard Go time.Time value:
@@ -72,7 +70,6 @@
//
// ts := timestamppb.Now()
// ... // make use of ts as a *timestamppb.Timestamp
-//
package timestamppb
import (
@@ -101,52 +98,50 @@ import (
//
// Example 1: Compute Timestamp from POSIX `time()`.
//
-// Timestamp timestamp;
-// timestamp.set_seconds(time(NULL));
-// timestamp.set_nanos(0);
+// Timestamp timestamp;
+// timestamp.set_seconds(time(NULL));
+// timestamp.set_nanos(0);
//
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
//
-// struct timeval tv;
-// gettimeofday(&tv, NULL);
+// struct timeval tv;
+// gettimeofday(&tv, NULL);
//
-// Timestamp timestamp;
-// timestamp.set_seconds(tv.tv_sec);
-// timestamp.set_nanos(tv.tv_usec * 1000);
+// Timestamp timestamp;
+// timestamp.set_seconds(tv.tv_sec);
+// timestamp.set_nanos(tv.tv_usec * 1000);
//
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
//
-// FILETIME ft;
-// GetSystemTimeAsFileTime(&ft);
-// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+// FILETIME ft;
+// GetSystemTimeAsFileTime(&ft);
+// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
//
-// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
-// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
-// Timestamp timestamp;
-// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
-// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+// Timestamp timestamp;
+// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
//
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
//
-// long millis = System.currentTimeMillis();
-//
-// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
-// .setNanos((int) ((millis % 1000) * 1000000)).build();
+// long millis = System.currentTimeMillis();
//
+// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+// .setNanos((int) ((millis % 1000) * 1000000)).build();
//
// Example 5: Compute Timestamp from Java `Instant.now()`.
//
-// Instant now = Instant.now();
-//
-// Timestamp timestamp =
-// Timestamp.newBuilder().setSeconds(now.getEpochSecond())
-// .setNanos(now.getNano()).build();
+// Instant now = Instant.now();
//
+// Timestamp timestamp =
+// Timestamp.newBuilder().setSeconds(now.getEpochSecond())
+// .setNanos(now.getNano()).build();
//
// Example 6: Compute Timestamp from current time in Python.
//
-// timestamp = Timestamp()
-// timestamp.GetCurrentTime()
+// timestamp = Timestamp()
+// timestamp.GetCurrentTime()
//
// # JSON Mapping
//
@@ -174,8 +169,6 @@ import (
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
// ) to obtain a formatter capable of generating timestamps in this format.
-//
-//
type Timestamp struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
diff --git a/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go
new file mode 100644
index 00000000..d0bb96a9
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go
@@ -0,0 +1,656 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+//
+// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is
+// just a program that reads a CodeGeneratorRequest from stdin and writes a
+// CodeGeneratorResponse to stdout.
+//
+// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead
+// of dealing with the raw protocol defined here.
+//
+// A plugin executable needs only to be placed somewhere in the path. The
+// plugin should be named "protoc-gen-$NAME", and will then be used when the
+// flag "--${NAME}_out" is passed to protoc.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/compiler/plugin.proto
+
+package pluginpb
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+// Sync with code_generator.h.
+type CodeGeneratorResponse_Feature int32
+
+const (
+ CodeGeneratorResponse_FEATURE_NONE CodeGeneratorResponse_Feature = 0
+ CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL CodeGeneratorResponse_Feature = 1
+)
+
+// Enum value maps for CodeGeneratorResponse_Feature.
+var (
+ CodeGeneratorResponse_Feature_name = map[int32]string{
+ 0: "FEATURE_NONE",
+ 1: "FEATURE_PROTO3_OPTIONAL",
+ }
+ CodeGeneratorResponse_Feature_value = map[string]int32{
+ "FEATURE_NONE": 0,
+ "FEATURE_PROTO3_OPTIONAL": 1,
+ }
+)
+
+func (x CodeGeneratorResponse_Feature) Enum() *CodeGeneratorResponse_Feature {
+ p := new(CodeGeneratorResponse_Feature)
+ *p = x
+ return p
+}
+
+func (x CodeGeneratorResponse_Feature) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (CodeGeneratorResponse_Feature) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_compiler_plugin_proto_enumTypes[0].Descriptor()
+}
+
+func (CodeGeneratorResponse_Feature) Type() protoreflect.EnumType {
+ return &file_google_protobuf_compiler_plugin_proto_enumTypes[0]
+}
+
+func (x CodeGeneratorResponse_Feature) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *CodeGeneratorResponse_Feature) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = CodeGeneratorResponse_Feature(num)
+ return nil
+}
+
+// Deprecated: Use CodeGeneratorResponse_Feature.Descriptor instead.
+func (CodeGeneratorResponse_Feature) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2, 0}
+}
+
+// The version number of protocol compiler.
+type Version struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"`
+ Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"`
+ Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"`
+ // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
+ // be empty for mainline stable releases.
+ Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"`
+}
+
+func (x *Version) Reset() {
+ *x = Version{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Version) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Version) ProtoMessage() {}
+
+func (x *Version) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Version.ProtoReflect.Descriptor instead.
+func (*Version) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Version) GetMajor() int32 {
+ if x != nil && x.Major != nil {
+ return *x.Major
+ }
+ return 0
+}
+
+func (x *Version) GetMinor() int32 {
+ if x != nil && x.Minor != nil {
+ return *x.Minor
+ }
+ return 0
+}
+
+func (x *Version) GetPatch() int32 {
+ if x != nil && x.Patch != nil {
+ return *x.Patch
+ }
+ return 0
+}
+
+func (x *Version) GetSuffix() string {
+ if x != nil && x.Suffix != nil {
+ return *x.Suffix
+ }
+ return ""
+}
+
+// An encoded CodeGeneratorRequest is written to the plugin's stdin.
+type CodeGeneratorRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The .proto files that were explicitly listed on the command-line. The
+ // code generator should generate code only for these files. Each file's
+ // descriptor will be included in proto_file, below.
+ FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"`
+ // The generator parameter passed on the command-line.
+ Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
+ // FileDescriptorProtos for all files in files_to_generate and everything
+ // they import. The files will appear in topological order, so each file
+ // appears before any file that imports it.
+ //
+ // protoc guarantees that all proto_files will be written after
+ // the fields above, even though this is not technically guaranteed by the
+ // protobuf wire format. This theoretically could allow a plugin to stream
+ // in the FileDescriptorProtos and handle them one by one rather than read
+ // the entire set into memory at once. However, as of this writing, this
+ // is not similarly optimized on protoc's end -- it will store all fields in
+ // memory at once before sending them to the plugin.
+ //
+ // Type names of fields and extensions in the FileDescriptorProto are always
+ // fully qualified.
+ ProtoFile []*descriptorpb.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"`
+ // The version number of protocol compiler.
+ CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"`
+}
+
+func (x *CodeGeneratorRequest) Reset() {
+ *x = CodeGeneratorRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CodeGeneratorRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CodeGeneratorRequest) ProtoMessage() {}
+
+func (x *CodeGeneratorRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CodeGeneratorRequest.ProtoReflect.Descriptor instead.
+func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *CodeGeneratorRequest) GetFileToGenerate() []string {
+ if x != nil {
+ return x.FileToGenerate
+ }
+ return nil
+}
+
+func (x *CodeGeneratorRequest) GetParameter() string {
+ if x != nil && x.Parameter != nil {
+ return *x.Parameter
+ }
+ return ""
+}
+
+func (x *CodeGeneratorRequest) GetProtoFile() []*descriptorpb.FileDescriptorProto {
+ if x != nil {
+ return x.ProtoFile
+ }
+ return nil
+}
+
+func (x *CodeGeneratorRequest) GetCompilerVersion() *Version {
+ if x != nil {
+ return x.CompilerVersion
+ }
+ return nil
+}
+
+// The plugin writes an encoded CodeGeneratorResponse to stdout.
+type CodeGeneratorResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Error message. If non-empty, code generation failed. The plugin process
+ // should exit with status code zero even if it reports an error in this way.
+ //
+ // This should be used to indicate errors in .proto files which prevent the
+ // code generator from generating correct code. Errors which indicate a
+ // problem in protoc itself -- such as the input CodeGeneratorRequest being
+ // unparseable -- should be reported by writing a message to stderr and
+ // exiting with a non-zero status code.
+ Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
+ // A bitmask of supported features that the code generator supports.
+ // This is a bitwise "or" of values from the Feature enum.
+ SupportedFeatures *uint64 `protobuf:"varint,2,opt,name=supported_features,json=supportedFeatures" json:"supported_features,omitempty"`
+ File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
+}
+
+func (x *CodeGeneratorResponse) Reset() {
+ *x = CodeGeneratorResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CodeGeneratorResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CodeGeneratorResponse) ProtoMessage() {}
+
+func (x *CodeGeneratorResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CodeGeneratorResponse.ProtoReflect.Descriptor instead.
+func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *CodeGeneratorResponse) GetError() string {
+ if x != nil && x.Error != nil {
+ return *x.Error
+ }
+ return ""
+}
+
+func (x *CodeGeneratorResponse) GetSupportedFeatures() uint64 {
+ if x != nil && x.SupportedFeatures != nil {
+ return *x.SupportedFeatures
+ }
+ return 0
+}
+
+func (x *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File {
+ if x != nil {
+ return x.File
+ }
+ return nil
+}
+
+// Represents a single generated file.
+type CodeGeneratorResponse_File struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The file name, relative to the output directory. The name must not
+ // contain "." or ".." components and must be relative, not be absolute (so,
+ // the file cannot lie outside the output directory). "/" must be used as
+ // the path separator, not "\".
+ //
+ // If the name is omitted, the content will be appended to the previous
+ // file. This allows the generator to break large files into small chunks,
+ // and allows the generated text to be streamed back to protoc so that large
+ // files need not reside completely in memory at one time. Note that as of
+ // this writing protoc does not optimize for this -- it will read the entire
+ // CodeGeneratorResponse before writing files to disk.
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ // If non-empty, indicates that the named file should already exist, and the
+ // content here is to be inserted into that file at a defined insertion
+ // point. This feature allows a code generator to extend the output
+ // produced by another code generator. The original generator may provide
+ // insertion points by placing special annotations in the file that look
+ // like:
+ //
+ // @@protoc_insertion_point(NAME)
+ //
+ // The annotation can have arbitrary text before and after it on the line,
+ // which allows it to be placed in a comment. NAME should be replaced with
+ // an identifier naming the point -- this is what other generators will use
+ // as the insertion_point. Code inserted at this point will be placed
+ // immediately above the line containing the insertion point (thus multiple
+ // insertions to the same point will come out in the order they were added).
+ // The double-@ is intended to make it unlikely that the generated code
+ // could contain things that look like insertion points by accident.
+ //
+ // For example, the C++ code generator places the following line in the
+ // .pb.h files that it generates:
+ //
+ // // @@protoc_insertion_point(namespace_scope)
+ //
+ // This line appears within the scope of the file's package namespace, but
+ // outside of any particular class. Another plugin can then specify the
+ // insertion_point "namespace_scope" to generate additional classes or
+ // other declarations that should be placed in this scope.
+ //
+ // Note that if the line containing the insertion point begins with
+ // whitespace, the same whitespace will be added to every line of the
+ // inserted text. This is useful for languages like Python, where
+ // indentation matters. In these languages, the insertion point comment
+ // should be indented the same amount as any inserted code will need to be
+ // in order to work correctly in that context.
+ //
+ // The code generator that generates the initial file and the one which
+ // inserts into it must both run as part of a single invocation of protoc.
+ // Code generators are executed in the order in which they appear on the
+ // command line.
+ //
+ // If |insertion_point| is present, |name| must also be present.
+ InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"`
+ // The file contents.
+ Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
+ // Information describing the file content being inserted. If an insertion
+ // point is used, this information will be appropriately offset and inserted
+ // into the code generation metadata for the generated files.
+ GeneratedCodeInfo *descriptorpb.GeneratedCodeInfo `protobuf:"bytes,16,opt,name=generated_code_info,json=generatedCodeInfo" json:"generated_code_info,omitempty"`
+}
+
+func (x *CodeGeneratorResponse_File) Reset() {
+ *x = CodeGeneratorResponse_File{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CodeGeneratorResponse_File) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CodeGeneratorResponse_File) ProtoMessage() {}
+
+func (x *CodeGeneratorResponse_File) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CodeGeneratorResponse_File.ProtoReflect.Descriptor instead.
+func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *CodeGeneratorResponse_File) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
+ }
+ return ""
+}
+
+func (x *CodeGeneratorResponse_File) GetInsertionPoint() string {
+ if x != nil && x.InsertionPoint != nil {
+ return *x.InsertionPoint
+ }
+ return ""
+}
+
+func (x *CodeGeneratorResponse_File) GetContent() string {
+ if x != nil && x.Content != nil {
+ return *x.Content
+ }
+ return ""
+}
+
+func (x *CodeGeneratorResponse_File) GetGeneratedCodeInfo() *descriptorpb.GeneratedCodeInfo {
+ if x != nil {
+ return x.GeneratedCodeInfo
+ }
+ return nil
+}
+
+var File_google_protobuf_compiler_plugin_proto protoreflect.FileDescriptor
+
+var file_google_protobuf_compiler_plugin_proto_rawDesc = []byte{
+ 0x0a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69,
+ 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65,
+ 0x72, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14,
+ 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d,
+ 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61,
+ 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68,
+ 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x22, 0xf1, 0x01, 0x0a, 0x14, 0x43, 0x6f, 0x64,
+ 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x28, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x6c,
+ 0x65, 0x54, 0x6f, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70,
+ 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+ 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0a, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x4c,
+ 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69,
+ 0x6c, 0x65, 0x72, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x63, 0x6f, 0x6d,
+ 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x94, 0x03, 0x0a,
+ 0x15, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2d, 0x0a, 0x12,
+ 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72,
+ 0x74, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x04, 0x66,
+ 0x69, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70,
+ 0x69, 0x6c, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52,
+ 0x04, 0x66, 0x69, 0x6c, 0x65, 0x1a, 0xb1, 0x01, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x6e, 0x73,
+ 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63,
+ 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f,
+ 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x52, 0x0a, 0x13, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x10, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f,
+ 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x11, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65,
+ 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x38, 0x0a, 0x07, 0x46, 0x65, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f,
+ 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52,
+ 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41,
+ 0x4c, 0x10, 0x01, 0x42, 0x72, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69,
+ 0x6c, 0x65, 0x72, 0x42, 0x0c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x73, 0x5a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
+ 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x70, 0x62, 0xaa, 0x02, 0x18, 0x47,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x43,
+ 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72,
+}
+
+var (
+ file_google_protobuf_compiler_plugin_proto_rawDescOnce sync.Once
+ file_google_protobuf_compiler_plugin_proto_rawDescData = file_google_protobuf_compiler_plugin_proto_rawDesc
+)
+
+func file_google_protobuf_compiler_plugin_proto_rawDescGZIP() []byte {
+ file_google_protobuf_compiler_plugin_proto_rawDescOnce.Do(func() {
+ file_google_protobuf_compiler_plugin_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_compiler_plugin_proto_rawDescData)
+ })
+ return file_google_protobuf_compiler_plugin_proto_rawDescData
+}
+
+var file_google_protobuf_compiler_plugin_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_google_protobuf_compiler_plugin_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_google_protobuf_compiler_plugin_proto_goTypes = []interface{}{
+ (CodeGeneratorResponse_Feature)(0), // 0: google.protobuf.compiler.CodeGeneratorResponse.Feature
+ (*Version)(nil), // 1: google.protobuf.compiler.Version
+ (*CodeGeneratorRequest)(nil), // 2: google.protobuf.compiler.CodeGeneratorRequest
+ (*CodeGeneratorResponse)(nil), // 3: google.protobuf.compiler.CodeGeneratorResponse
+ (*CodeGeneratorResponse_File)(nil), // 4: google.protobuf.compiler.CodeGeneratorResponse.File
+ (*descriptorpb.FileDescriptorProto)(nil), // 5: google.protobuf.FileDescriptorProto
+ (*descriptorpb.GeneratedCodeInfo)(nil), // 6: google.protobuf.GeneratedCodeInfo
+}
+var file_google_protobuf_compiler_plugin_proto_depIdxs = []int32{
+ 5, // 0: google.protobuf.compiler.CodeGeneratorRequest.proto_file:type_name -> google.protobuf.FileDescriptorProto
+ 1, // 1: google.protobuf.compiler.CodeGeneratorRequest.compiler_version:type_name -> google.protobuf.compiler.Version
+ 4, // 2: google.protobuf.compiler.CodeGeneratorResponse.file:type_name -> google.protobuf.compiler.CodeGeneratorResponse.File
+ 6, // 3: google.protobuf.compiler.CodeGeneratorResponse.File.generated_code_info:type_name -> google.protobuf.GeneratedCodeInfo
+ 4, // [4:4] is the sub-list for method output_type
+ 4, // [4:4] is the sub-list for method input_type
+ 4, // [4:4] is the sub-list for extension type_name
+ 4, // [4:4] is the sub-list for extension extendee
+ 0, // [0:4] is the sub-list for field type_name
+}
+
+func init() { file_google_protobuf_compiler_plugin_proto_init() }
+func file_google_protobuf_compiler_plugin_proto_init() {
+ if File_google_protobuf_compiler_plugin_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_protobuf_compiler_plugin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Version); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_compiler_plugin_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CodeGeneratorRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_compiler_plugin_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CodeGeneratorResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_protobuf_compiler_plugin_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CodeGeneratorResponse_File); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_protobuf_compiler_plugin_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 4,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_protobuf_compiler_plugin_proto_goTypes,
+ DependencyIndexes: file_google_protobuf_compiler_plugin_proto_depIdxs,
+ EnumInfos: file_google_protobuf_compiler_plugin_proto_enumTypes,
+ MessageInfos: file_google_protobuf_compiler_plugin_proto_msgTypes,
+ }.Build()
+ File_google_protobuf_compiler_plugin_proto = out.File
+ file_google_protobuf_compiler_plugin_proto_rawDesc = nil
+ file_google_protobuf_compiler_plugin_proto_goTypes = nil
+ file_google_protobuf_compiler_plugin_proto_depIdxs = nil
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 3b5d4b45..528d2af8 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -27,7 +27,7 @@ github.com/btcsuite/btcutil/base58
# github.com/cespare/xxhash/v2 v2.2.0
## explicit; go 1.11
github.com/cespare/xxhash/v2
-# github.com/containerd/cgroups v1.0.4
+# github.com/containerd/cgroups v1.1.0
## explicit; go 1.17
github.com/containerd/cgroups
github.com/containerd/cgroups/stats/v1
@@ -92,7 +92,7 @@ github.com/go-oauth2/oauth2/v4/store
# github.com/go-session/session v3.1.2+incompatible
## explicit
github.com/go-session/session
-# github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0
+# github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572
## explicit; go 1.13
github.com/go-task/slim-sprig
# github.com/godbus/dbus/v5 v5.1.0
@@ -113,7 +113,7 @@ github.com/golang/glog
## explicit; go 1.11
github.com/golang/mock/mockgen
github.com/golang/mock/mockgen/model
-# github.com/golang/protobuf v1.5.2
+# github.com/golang/protobuf v1.5.3
## explicit; go 1.9
github.com/golang/protobuf/jsonpb
github.com/golang/protobuf/proto
@@ -125,8 +125,8 @@ github.com/golang/protobuf/ptypes/timestamp
# github.com/google/gopacket v1.1.19
## explicit; go 1.12
github.com/google/gopacket/routing
-# github.com/google/pprof v0.0.0-20230111200839-76d1ae5aea2b
-## explicit; go 1.18
+# github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b
+## explicit; go 1.19
github.com/google/pprof/profile
# github.com/google/uuid v1.3.0
## explicit
@@ -167,7 +167,7 @@ github.com/heetch/confita/backend
github.com/heetch/confita/backend/env
github.com/heetch/confita/backend/file
github.com/heetch/confita/backend/flags
-# github.com/huin/goupnp v1.0.3
+# github.com/huin/goupnp v1.1.0
## explicit; go 1.14
github.com/huin/goupnp
github.com/huin/goupnp/dcps/internetgateway1
@@ -176,8 +176,8 @@ github.com/huin/goupnp/httpu
github.com/huin/goupnp/scpd
github.com/huin/goupnp/soap
github.com/huin/goupnp/ssdp
-# github.com/ipfs/go-cid v0.3.2
-## explicit; go 1.18
+# github.com/ipfs/go-cid v0.4.1
+## explicit; go 1.19
github.com/ipfs/go-cid
# github.com/ipfs/go-datastore v0.6.0
## explicit; go 1.17
@@ -234,21 +234,24 @@ github.com/jbenet/goprocess/context
github.com/karlseguin/ccache
# github.com/karlseguin/expect v1.0.8
## explicit; go 1.14
-# github.com/klauspost/compress v1.15.14
-## explicit; go 1.17
+# github.com/klauspost/compress v1.16.4
+## explicit; go 1.18
github.com/klauspost/compress
+github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
github.com/klauspost/compress/internal/cpuinfo
github.com/klauspost/compress/internal/snapref
github.com/klauspost/compress/zstd
github.com/klauspost/compress/zstd/internal/xxhash
-# github.com/klauspost/cpuid/v2 v2.2.3
+# github.com/klauspost/cpuid/v2 v2.2.4
## explicit; go 1.15
github.com/klauspost/cpuid/v2
-# github.com/koron/go-ssdp v0.0.3
-## explicit; go 1.17
+# github.com/koron/go-ssdp v0.0.4
+## explicit; go 1.19
github.com/koron/go-ssdp
+github.com/koron/go-ssdp/internal/multicast
+github.com/koron/go-ssdp/internal/ssdplog
# github.com/libp2p/go-buffer-pool v0.1.0
## explicit; go 1.17
github.com/libp2p/go-buffer-pool
@@ -259,8 +262,8 @@ github.com/libp2p/go-cidranger/net
# github.com/libp2p/go-flow-metrics v0.1.0
## explicit; go 1.17
github.com/libp2p/go-flow-metrics
-# github.com/libp2p/go-libp2p v0.24.2
-## explicit; go 1.18
+# github.com/libp2p/go-libp2p v0.27.8
+## explicit; go 1.19
github.com/libp2p/go-libp2p
github.com/libp2p/go-libp2p/config
github.com/libp2p/go-libp2p/core/canonicallog
@@ -271,8 +274,6 @@ github.com/libp2p/go-libp2p/core/crypto/pb
github.com/libp2p/go-libp2p/core/event
github.com/libp2p/go-libp2p/core/host
github.com/libp2p/go-libp2p/core/internal/catch
-github.com/libp2p/go-libp2p/core/introspection
-github.com/libp2p/go-libp2p/core/introspection/pb
github.com/libp2p/go-libp2p/core/metrics
github.com/libp2p/go-libp2p/core/network
github.com/libp2p/go-libp2p/core/peer
@@ -299,6 +300,7 @@ github.com/libp2p/go-libp2p/p2p/host/pstoremanager
github.com/libp2p/go-libp2p/p2p/host/relaysvc
github.com/libp2p/go-libp2p/p2p/host/resource-manager
github.com/libp2p/go-libp2p/p2p/host/routed
+github.com/libp2p/go-libp2p/p2p/metricshelper
github.com/libp2p/go-libp2p/p2p/muxer/yamux
github.com/libp2p/go-libp2p/p2p/net/connmgr
github.com/libp2p/go-libp2p/p2p/net/nat
@@ -306,8 +308,6 @@ github.com/libp2p/go-libp2p/p2p/net/pnet
github.com/libp2p/go-libp2p/p2p/net/reuseport
github.com/libp2p/go-libp2p/p2p/net/swarm
github.com/libp2p/go-libp2p/p2p/net/upgrader
-github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/pb
-github.com/libp2p/go-libp2p/p2p/protocol/circuitv1/relay
github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/client
github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/pb
github.com/libp2p/go-libp2p/p2p/protocol/circuitv2/proto
@@ -325,8 +325,9 @@ github.com/libp2p/go-libp2p/p2p/transport/quic
github.com/libp2p/go-libp2p/p2p/transport/quicreuse
github.com/libp2p/go-libp2p/p2p/transport/tcp
github.com/libp2p/go-libp2p/p2p/transport/websocket
-# github.com/libp2p/go-libp2p-asn-util v0.2.0
-## explicit; go 1.17
+github.com/libp2p/go-libp2p/p2p/transport/webtransport
+# github.com/libp2p/go-libp2p-asn-util v0.3.0
+## explicit; go 1.19
github.com/libp2p/go-libp2p-asn-util
# github.com/libp2p/go-libp2p-kad-dht v0.20.0
## explicit; go 1.18
@@ -351,6 +352,7 @@ github.com/libp2p/go-libp2p-record/pb
# github.com/libp2p/go-msgio v0.3.0
## explicit; go 1.18
github.com/libp2p/go-msgio
+github.com/libp2p/go-msgio/pbio
github.com/libp2p/go-msgio/protoio
# github.com/libp2p/go-nat v0.1.0
## explicit; go 1.16
@@ -358,58 +360,28 @@ github.com/libp2p/go-nat
# github.com/libp2p/go-netroute v0.2.1
## explicit; go 1.18
github.com/libp2p/go-netroute
-# github.com/libp2p/go-openssl v0.1.0
-## explicit; go 1.17
-github.com/libp2p/go-openssl
-github.com/libp2p/go-openssl/utils
# github.com/libp2p/go-reuseport v0.2.0
## explicit; go 1.17
github.com/libp2p/go-reuseport
# github.com/libp2p/go-yamux/v4 v4.0.0
## explicit; go 1.18
github.com/libp2p/go-yamux/v4
-# github.com/lucas-clemente/quic-go v0.31.1
-## explicit; go 1.18
-github.com/lucas-clemente/quic-go
-github.com/lucas-clemente/quic-go/internal/ackhandler
-github.com/lucas-clemente/quic-go/internal/congestion
-github.com/lucas-clemente/quic-go/internal/flowcontrol
-github.com/lucas-clemente/quic-go/internal/handshake
-github.com/lucas-clemente/quic-go/internal/logutils
-github.com/lucas-clemente/quic-go/internal/protocol
-github.com/lucas-clemente/quic-go/internal/qerr
-github.com/lucas-clemente/quic-go/internal/qtls
-github.com/lucas-clemente/quic-go/internal/utils
-github.com/lucas-clemente/quic-go/internal/utils/linkedlist
-github.com/lucas-clemente/quic-go/internal/wire
-github.com/lucas-clemente/quic-go/logging
-github.com/lucas-clemente/quic-go/qlog
-github.com/lucas-clemente/quic-go/quicvarint
# github.com/mailjet/mailjet-apiv3-go/v3 v3.1.1
## explicit; go 1.13
github.com/mailjet/mailjet-apiv3-go/v3
github.com/mailjet/mailjet-apiv3-go/v3/fixtures
github.com/mailjet/mailjet-apiv3-go/v3/resources
-# github.com/marten-seemann/qtls-go1-18 v0.1.4
-## explicit; go 1.18
-github.com/marten-seemann/qtls-go1-18
-# github.com/marten-seemann/qtls-go1-19 v0.1.2
-## explicit; go 1.19
-github.com/marten-seemann/qtls-go1-19
# github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd
## explicit; go 1.15
github.com/marten-seemann/tcp
-# github.com/mattn/go-isatty v0.0.17
+# github.com/mattn/go-isatty v0.0.18
## explicit; go 1.15
github.com/mattn/go-isatty
-# github.com/mattn/go-pointer v0.0.1
-## explicit
-github.com/mattn/go-pointer
# github.com/matttproud/golang_protobuf_extensions v1.0.4
## explicit; go 1.9
github.com/matttproud/golang_protobuf_extensions/pbutil
-# github.com/miekg/dns v1.1.50
-## explicit; go 1.14
+# github.com/miekg/dns v1.1.53
+## explicit; go 1.19
github.com/miekg/dns
# github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b
## explicit
@@ -429,7 +401,7 @@ github.com/multiformats/go-base32
# github.com/multiformats/go-base36 v0.2.0
## explicit; go 1.18
github.com/multiformats/go-base36
-# github.com/multiformats/go-multiaddr v0.8.0
+# github.com/multiformats/go-multiaddr v0.9.0
## explicit; go 1.18
github.com/multiformats/go-multiaddr
github.com/multiformats/go-multiaddr/net
@@ -439,11 +411,11 @@ github.com/multiformats/go-multiaddr-dns
# github.com/multiformats/go-multiaddr-fmt v0.1.0
## explicit; go 1.13
github.com/multiformats/go-multiaddr-fmt
-# github.com/multiformats/go-multibase v0.1.1
-## explicit; go 1.17
+# github.com/multiformats/go-multibase v0.2.0
+## explicit; go 1.19
github.com/multiformats/go-multibase
-# github.com/multiformats/go-multicodec v0.7.0
-## explicit; go 1.18
+# github.com/multiformats/go-multicodec v0.8.1
+## explicit; go 1.19
github.com/multiformats/go-multicodec
# github.com/multiformats/go-multihash v0.2.1
## explicit; go 1.17
@@ -455,8 +427,8 @@ github.com/multiformats/go-multihash/register/blake3
github.com/multiformats/go-multihash/register/miniosha256
github.com/multiformats/go-multihash/register/murmur3
github.com/multiformats/go-multihash/register/sha3
-# github.com/multiformats/go-multistream v0.3.3
-## explicit; go 1.17
+# github.com/multiformats/go-multistream v0.4.1
+## explicit; go 1.19
github.com/multiformats/go-multistream
# github.com/multiformats/go-varint v0.0.7
## explicit; go 1.18
@@ -466,7 +438,7 @@ github.com/multiformats/go-varint
github.com/nicksnyder/go-i18n/v2/i18n
github.com/nicksnyder/go-i18n/v2/internal
github.com/nicksnyder/go-i18n/v2/internal/plural
-# github.com/onsi/ginkgo/v2 v2.7.0
+# github.com/onsi/ginkgo/v2 v2.9.2
## explicit; go 1.18
github.com/onsi/ginkgo/v2/config
github.com/onsi/ginkgo/v2/formatter
@@ -517,8 +489,8 @@ github.com/prometheus/client_golang/prometheus/promhttp
# github.com/prometheus/client_model v0.3.0
## explicit; go 1.9
github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.39.0
-## explicit; go 1.17
+# github.com/prometheus/common v0.42.0
+## explicit; go 1.18
github.com/prometheus/common/expfmt
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
github.com/prometheus/common/model
@@ -527,19 +499,46 @@ github.com/prometheus/common/model
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
+# github.com/quic-go/qpack v0.4.0
+## explicit; go 1.18
+github.com/quic-go/qpack
+# github.com/quic-go/qtls-go1-19 v0.3.3
+## explicit; go 1.19
+github.com/quic-go/qtls-go1-19
+# github.com/quic-go/qtls-go1-20 v0.2.3
+## explicit; go 1.20
+github.com/quic-go/qtls-go1-20
+# github.com/quic-go/quic-go v0.33.0
+## explicit; go 1.19
+github.com/quic-go/quic-go
+github.com/quic-go/quic-go/http3
+github.com/quic-go/quic-go/internal/ackhandler
+github.com/quic-go/quic-go/internal/congestion
+github.com/quic-go/quic-go/internal/flowcontrol
+github.com/quic-go/quic-go/internal/handshake
+github.com/quic-go/quic-go/internal/logutils
+github.com/quic-go/quic-go/internal/protocol
+github.com/quic-go/quic-go/internal/qerr
+github.com/quic-go/quic-go/internal/qtls
+github.com/quic-go/quic-go/internal/utils
+github.com/quic-go/quic-go/internal/utils/linkedlist
+github.com/quic-go/quic-go/internal/wire
+github.com/quic-go/quic-go/logging
+github.com/quic-go/quic-go/qlog
+github.com/quic-go/quic-go/quicvarint
+# github.com/quic-go/webtransport-go v0.5.2
+## explicit; go 1.18
+github.com/quic-go/webtransport-go
# github.com/raulk/go-watchdog v1.3.0
## explicit; go 1.15
github.com/raulk/go-watchdog
# github.com/satori/go.uuid v1.2.0
## explicit
github.com/satori/go.uuid
-# github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572
-## explicit
-github.com/spacemonkeygo/spacelog
# github.com/spaolacci/murmur3 v1.1.0
## explicit
github.com/spaolacci/murmur3
-# github.com/stretchr/testify v1.8.1
+# github.com/stretchr/testify v1.8.2
## explicit; go 1.13
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
@@ -594,7 +593,7 @@ go.uber.org/dig/internal/digerror
go.uber.org/dig/internal/digreflect
go.uber.org/dig/internal/dot
go.uber.org/dig/internal/graph
-# go.uber.org/fx v1.19.1
+# go.uber.org/fx v1.19.2
## explicit; go 1.18
go.uber.org/fx
go.uber.org/fx/fxevent
@@ -602,7 +601,7 @@ go.uber.org/fx/internal/fxclock
go.uber.org/fx/internal/fxlog
go.uber.org/fx/internal/fxreflect
go.uber.org/fx/internal/lifecycle
-# go.uber.org/multierr v1.9.0
+# go.uber.org/multierr v1.11.0
## explicit; go 1.19
go.uber.org/multierr
# go.uber.org/zap v1.24.0
@@ -633,10 +632,11 @@ golang.org/x/crypto/pbkdf2
golang.org/x/crypto/ripemd160
golang.org/x/crypto/salsa20/salsa
golang.org/x/crypto/sha3
-# golang.org/x/exp v0.0.0-20230113213754-f9f960f08ad4
+# golang.org/x/exp v0.0.0-20230321023759-10a507213a29
## explicit; go 1.18
golang.org/x/exp/constraints
-# golang.org/x/mod v0.8.0
+golang.org/x/exp/slices
+# golang.org/x/mod v0.10.0
## explicit; go 1.17
golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/modfile
@@ -646,7 +646,6 @@ golang.org/x/mod/semver
## explicit; go 1.17
golang.org/x/net/bpf
golang.org/x/net/context
-golang.org/x/net/context/ctxhttp
golang.org/x/net/http/httpguts
golang.org/x/net/http2
golang.org/x/net/http2/hpack
@@ -658,7 +657,7 @@ golang.org/x/net/ipv4
golang.org/x/net/ipv6
golang.org/x/net/route
golang.org/x/net/trace
-# golang.org/x/oauth2 v0.3.0
+# golang.org/x/oauth2 v0.5.0
## explicit; go 1.17
golang.org/x/oauth2
golang.org/x/oauth2/authhandler
@@ -686,13 +685,15 @@ golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
-# golang.org/x/tools v0.6.0
+# golang.org/x/tools v0.7.0
## explicit; go 1.18
+golang.org/x/tools/cmd/goimports
golang.org/x/tools/go/ast/astutil
golang.org/x/tools/go/ast/inspector
golang.org/x/tools/go/gcexportdata
golang.org/x/tools/go/internal/packagesdriver
golang.org/x/tools/go/packages
+golang.org/x/tools/go/types/objectpath
golang.org/x/tools/imports
golang.org/x/tools/internal/event
golang.org/x/tools/internal/event/core
@@ -771,8 +772,11 @@ google.golang.org/grpc/serviceconfig
google.golang.org/grpc/stats
google.golang.org/grpc/status
google.golang.org/grpc/tap
-# google.golang.org/protobuf v1.28.1
+# google.golang.org/protobuf v1.30.0
## explicit; go 1.11
+google.golang.org/protobuf/cmd/protoc-gen-go
+google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo
+google.golang.org/protobuf/compiler/protogen
google.golang.org/protobuf/encoding/protojson
google.golang.org/protobuf/encoding/prototext
google.golang.org/protobuf/encoding/protowire
@@ -790,6 +794,7 @@ google.golang.org/protobuf/internal/filetype
google.golang.org/protobuf/internal/flags
google.golang.org/protobuf/internal/genid
google.golang.org/protobuf/internal/impl
+google.golang.org/protobuf/internal/msgfmt
google.golang.org/protobuf/internal/order
google.golang.org/protobuf/internal/pragma
google.golang.org/protobuf/internal/set
@@ -797,14 +802,18 @@ google.golang.org/protobuf/internal/strs
google.golang.org/protobuf/internal/version
google.golang.org/protobuf/proto
google.golang.org/protobuf/reflect/protodesc
+google.golang.org/protobuf/reflect/protopath
+google.golang.org/protobuf/reflect/protorange
google.golang.org/protobuf/reflect/protoreflect
google.golang.org/protobuf/reflect/protoregistry
google.golang.org/protobuf/runtime/protoiface
google.golang.org/protobuf/runtime/protoimpl
google.golang.org/protobuf/types/descriptorpb
+google.golang.org/protobuf/types/dynamicpb
google.golang.org/protobuf/types/known/anypb
google.golang.org/protobuf/types/known/durationpb
google.golang.org/protobuf/types/known/timestamppb
+google.golang.org/protobuf/types/pluginpb
# gopkg.in/yaml.v2 v2.4.0
## explicit; go 1.15
gopkg.in/yaml.v2
@@ -814,3 +823,10 @@ gopkg.in/yaml.v3
# lukechampine.com/blake3 v1.1.7
## explicit; go 1.13
lukechampine.com/blake3
+# nhooyr.io/websocket v1.8.7
+## explicit; go 1.13
+nhooyr.io/websocket
+nhooyr.io/websocket/internal/bpool
+nhooyr.io/websocket/internal/errd
+nhooyr.io/websocket/internal/wsjs
+nhooyr.io/websocket/internal/xsync
diff --git a/vendor/nhooyr.io/websocket/.gitignore b/vendor/nhooyr.io/websocket/.gitignore
new file mode 100644
index 00000000..6961e5c8
--- /dev/null
+++ b/vendor/nhooyr.io/websocket/.gitignore
@@ -0,0 +1 @@
+websocket.test
diff --git a/vendor/github.com/mattn/go-pointer/LICENSE b/vendor/nhooyr.io/websocket/LICENSE.txt
similarity index 94%
rename from vendor/github.com/mattn/go-pointer/LICENSE
rename to vendor/nhooyr.io/websocket/LICENSE.txt
index 5794eddc..b5b5fef3 100644
--- a/vendor/github.com/mattn/go-pointer/LICENSE
+++ b/vendor/nhooyr.io/websocket/LICENSE.txt
@@ -1,6 +1,6 @@
-The MIT License (MIT)
+MIT License
-Copyright (c) 2019 Yasuhiro Matsumoto
+Copyright (c) 2018 Anmol Sethi
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/nhooyr.io/websocket/README.md b/vendor/nhooyr.io/websocket/README.md
new file mode 100644
index 00000000..df20c581
--- /dev/null
+++ b/vendor/nhooyr.io/websocket/README.md
@@ -0,0 +1,132 @@
+# websocket
+
+[](https://pkg.go.dev/nhooyr.io/websocket)
+[](https://nhooyrio-websocket-coverage.netlify.app)
+
+websocket is a minimal and idiomatic WebSocket library for Go.
+
+## Install
+
+```bash
+go get nhooyr.io/websocket
+```
+
+## Highlights
+
+- Minimal and idiomatic API
+- First class [context.Context](https://blog.golang.org/context) support
+- Fully passes the WebSocket [autobahn-testsuite](https://github.com/crossbario/autobahn-testsuite)
+- [Single dependency](https://pkg.go.dev/nhooyr.io/websocket?tab=imports)
+- JSON and protobuf helpers in the [wsjson](https://pkg.go.dev/nhooyr.io/websocket/wsjson) and [wspb](https://pkg.go.dev/nhooyr.io/websocket/wspb) subpackages
+- Zero alloc reads and writes
+- Concurrent writes
+- [Close handshake](https://pkg.go.dev/nhooyr.io/websocket#Conn.Close)
+- [net.Conn](https://pkg.go.dev/nhooyr.io/websocket#NetConn) wrapper
+- [Ping pong](https://pkg.go.dev/nhooyr.io/websocket#Conn.Ping) API
+- [RFC 7692](https://tools.ietf.org/html/rfc7692) permessage-deflate compression
+- Compile to [Wasm](https://pkg.go.dev/nhooyr.io/websocket#hdr-Wasm)
+
+## Roadmap
+
+- [ ] HTTP/2 [#4](https://github.com/nhooyr/websocket/issues/4)
+
+## Examples
+
+For a production quality example that demonstrates the complete API, see the
+[echo example](./examples/echo).
+
+For a full stack example, see the [chat example](./examples/chat).
+
+### Server
+
+```go
+http.HandlerFunc(func (w http.ResponseWriter, r *http.Request) {
+ c, err := websocket.Accept(w, r, nil)
+ if err != nil {
+ // ...
+ }
+ defer c.Close(websocket.StatusInternalError, "the sky is falling")
+
+ ctx, cancel := context.WithTimeout(r.Context(), time.Second*10)
+ defer cancel()
+
+ var v interface{}
+ err = wsjson.Read(ctx, c, &v)
+ if err != nil {
+ // ...
+ }
+
+ log.Printf("received: %v", v)
+
+ c.Close(websocket.StatusNormalClosure, "")
+})
+```
+
+### Client
+
+```go
+ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
+defer cancel()
+
+c, _, err := websocket.Dial(ctx, "ws://localhost:8080", nil)
+if err != nil {
+ // ...
+}
+defer c.Close(websocket.StatusInternalError, "the sky is falling")
+
+err = wsjson.Write(ctx, c, "hi")
+if err != nil {
+ // ...
+}
+
+c.Close(websocket.StatusNormalClosure, "")
+```
+
+## Comparison
+
+### gorilla/websocket
+
+Advantages of [gorilla/websocket](https://github.com/gorilla/websocket):
+
+- Mature and widely used
+- [Prepared writes](https://pkg.go.dev/github.com/gorilla/websocket#PreparedMessage)
+- Configurable [buffer sizes](https://pkg.go.dev/github.com/gorilla/websocket#hdr-Buffers)
+
+Advantages of nhooyr.io/websocket:
+
+- Minimal and idiomatic API
+ - Compare godoc of [nhooyr.io/websocket](https://pkg.go.dev/nhooyr.io/websocket) with [gorilla/websocket](https://pkg.go.dev/github.com/gorilla/websocket) side by side.
+- [net.Conn](https://pkg.go.dev/nhooyr.io/websocket#NetConn) wrapper
+- Zero alloc reads and writes ([gorilla/websocket#535](https://github.com/gorilla/websocket/issues/535))
+- Full [context.Context](https://blog.golang.org/context) support
+- Dial uses [net/http.Client](https://golang.org/pkg/net/http/#Client)
+ - Will enable easy HTTP/2 support in the future
+ - Gorilla writes directly to a net.Conn and so duplicates features of net/http.Client.
+- Concurrent writes
+- Close handshake ([gorilla/websocket#448](https://github.com/gorilla/websocket/issues/448))
+- Idiomatic [ping pong](https://pkg.go.dev/nhooyr.io/websocket#Conn.Ping) API
+ - Gorilla requires registering a pong callback before sending a Ping
+- Can target Wasm ([gorilla/websocket#432](https://github.com/gorilla/websocket/issues/432))
+- Transparent message buffer reuse with [wsjson](https://pkg.go.dev/nhooyr.io/websocket/wsjson) and [wspb](https://pkg.go.dev/nhooyr.io/websocket/wspb) subpackages
+- [1.75x](https://github.com/nhooyr/websocket/releases/tag/v1.7.4) faster WebSocket masking implementation in pure Go
+ - Gorilla's implementation is slower and uses [unsafe](https://golang.org/pkg/unsafe/).
+- Full [permessage-deflate](https://tools.ietf.org/html/rfc7692) compression extension support
+ - Gorilla only supports no context takeover mode
+ - We use [klauspost/compress](https://github.com/klauspost/compress) for much lower memory usage ([gorilla/websocket#203](https://github.com/gorilla/websocket/issues/203))
+- [CloseRead](https://pkg.go.dev/nhooyr.io/websocket#Conn.CloseRead) helper ([gorilla/websocket#492](https://github.com/gorilla/websocket/issues/492))
+- Actively maintained ([gorilla/websocket#370](https://github.com/gorilla/websocket/issues/370))
+
+#### golang.org/x/net/websocket
+
+[golang.org/x/net/websocket](https://pkg.go.dev/golang.org/x/net/websocket) is deprecated.
+See [golang/go/issues/18152](https://github.com/golang/go/issues/18152).
+
+The [net.Conn](https://pkg.go.dev/nhooyr.io/websocket#NetConn) can help in transitioning
+to nhooyr.io/websocket.
+
+#### gobwas/ws
+
+[gobwas/ws](https://github.com/gobwas/ws) has an extremely flexible API that allows it to be used
+in an event driven style for performance. See the author's [blog post](https://medium.freecodecamp.org/million-websockets-and-go-cc58418460bb).
+
+However when writing idiomatic Go, nhooyr.io/websocket will be faster and easier to use.
diff --git a/vendor/nhooyr.io/websocket/accept.go b/vendor/nhooyr.io/websocket/accept.go
new file mode 100644
index 00000000..18536bdb
--- /dev/null
+++ b/vendor/nhooyr.io/websocket/accept.go
@@ -0,0 +1,370 @@
+// +build !js
+
+package websocket
+
+import (
+ "bytes"
+ "crypto/sha1"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "net/textproto"
+ "net/url"
+ "path/filepath"
+ "strings"
+
+ "nhooyr.io/websocket/internal/errd"
+)
+
+// AcceptOptions represents Accept's options.
+type AcceptOptions struct {
+ // Subprotocols lists the WebSocket subprotocols that Accept will negotiate with the client.
+ // The empty subprotocol will always be negotiated as per RFC 6455. If you would like to
+ // reject it, close the connection when c.Subprotocol() == "".
+ Subprotocols []string
+
+ // InsecureSkipVerify is used to disable Accept's origin verification behaviour.
+ //
+ // You probably want to use OriginPatterns instead.
+ InsecureSkipVerify bool
+
+ // OriginPatterns lists the host patterns for authorized origins.
+ // The request host is always authorized.
+ // Use this to enable cross origin WebSockets.
+ //
+ // i.e javascript running on example.com wants to access a WebSocket server at chat.example.com.
+ // In such a case, example.com is the origin and chat.example.com is the request host.
+ // One would set this field to []string{"example.com"} to authorize example.com to connect.
+ //
+ // Each pattern is matched case insensitively against the request origin host
+ // with filepath.Match.
+ // See https://golang.org/pkg/path/filepath/#Match
+ //
+ // Please ensure you understand the ramifications of enabling this.
+ // If used incorrectly your WebSocket server will be open to CSRF attacks.
+ //
+ // Do not use * as a pattern to allow any origin, prefer to use InsecureSkipVerify instead
+ // to bring attention to the danger of such a setting.
+ OriginPatterns []string
+
+ // CompressionMode controls the compression mode.
+ // Defaults to CompressionNoContextTakeover.
+ //
+ // See docs on CompressionMode for details.
+ CompressionMode CompressionMode
+
+ // CompressionThreshold controls the minimum size of a message before compression is applied.
+ //
+ // Defaults to 512 bytes for CompressionNoContextTakeover and 128 bytes
+ // for CompressionContextTakeover.
+ CompressionThreshold int
+}
+
+// Accept accepts a WebSocket handshake from a client and upgrades the
+// the connection to a WebSocket.
+//
+// Accept will not allow cross origin requests by default.
+// See the InsecureSkipVerify and OriginPatterns options to allow cross origin requests.
+//
+// Accept will write a response to w on all errors.
+func Accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (*Conn, error) {
+ return accept(w, r, opts)
+}
+
+func accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (_ *Conn, err error) {
+ defer errd.Wrap(&err, "failed to accept WebSocket connection")
+
+ if opts == nil {
+ opts = &AcceptOptions{}
+ }
+ opts = &*opts
+
+ errCode, err := verifyClientRequest(w, r)
+ if err != nil {
+ http.Error(w, err.Error(), errCode)
+ return nil, err
+ }
+
+ if !opts.InsecureSkipVerify {
+ err = authenticateOrigin(r, opts.OriginPatterns)
+ if err != nil {
+ if errors.Is(err, filepath.ErrBadPattern) {
+ log.Printf("websocket: %v", err)
+ err = errors.New(http.StatusText(http.StatusForbidden))
+ }
+ http.Error(w, err.Error(), http.StatusForbidden)
+ return nil, err
+ }
+ }
+
+ hj, ok := w.(http.Hijacker)
+ if !ok {
+ err = errors.New("http.ResponseWriter does not implement http.Hijacker")
+ http.Error(w, http.StatusText(http.StatusNotImplemented), http.StatusNotImplemented)
+ return nil, err
+ }
+
+ w.Header().Set("Upgrade", "websocket")
+ w.Header().Set("Connection", "Upgrade")
+
+ key := r.Header.Get("Sec-WebSocket-Key")
+ w.Header().Set("Sec-WebSocket-Accept", secWebSocketAccept(key))
+
+ subproto := selectSubprotocol(r, opts.Subprotocols)
+ if subproto != "" {
+ w.Header().Set("Sec-WebSocket-Protocol", subproto)
+ }
+
+ copts, err := acceptCompression(r, w, opts.CompressionMode)
+ if err != nil {
+ return nil, err
+ }
+
+ w.WriteHeader(http.StatusSwitchingProtocols)
+ // See https://github.com/nhooyr/websocket/issues/166
+ if ginWriter, ok := w.(interface {
+ WriteHeaderNow()
+ }); ok {
+ ginWriter.WriteHeaderNow()
+ }
+
+ netConn, brw, err := hj.Hijack()
+ if err != nil {
+ err = fmt.Errorf("failed to hijack connection: %w", err)
+ http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
+ return nil, err
+ }
+
+ // https://github.com/golang/go/issues/32314
+ b, _ := brw.Reader.Peek(brw.Reader.Buffered())
+ brw.Reader.Reset(io.MultiReader(bytes.NewReader(b), netConn))
+
+ return newConn(connConfig{
+ subprotocol: w.Header().Get("Sec-WebSocket-Protocol"),
+ rwc: netConn,
+ client: false,
+ copts: copts,
+ flateThreshold: opts.CompressionThreshold,
+
+ br: brw.Reader,
+ bw: brw.Writer,
+ }), nil
+}
+
+func verifyClientRequest(w http.ResponseWriter, r *http.Request) (errCode int, _ error) {
+ if !r.ProtoAtLeast(1, 1) {
+ return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: handshake request must be at least HTTP/1.1: %q", r.Proto)
+ }
+
+ if !headerContainsTokenIgnoreCase(r.Header, "Connection", "Upgrade") {
+ w.Header().Set("Connection", "Upgrade")
+ w.Header().Set("Upgrade", "websocket")
+ return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: Connection header %q does not contain Upgrade", r.Header.Get("Connection"))
+ }
+
+ if !headerContainsTokenIgnoreCase(r.Header, "Upgrade", "websocket") {
+ w.Header().Set("Connection", "Upgrade")
+ w.Header().Set("Upgrade", "websocket")
+ return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: Upgrade header %q does not contain websocket", r.Header.Get("Upgrade"))
+ }
+
+ if r.Method != "GET" {
+ return http.StatusMethodNotAllowed, fmt.Errorf("WebSocket protocol violation: handshake request method is not GET but %q", r.Method)
+ }
+
+ if r.Header.Get("Sec-WebSocket-Version") != "13" {
+ w.Header().Set("Sec-WebSocket-Version", "13")
+ return http.StatusBadRequest, fmt.Errorf("unsupported WebSocket protocol version (only 13 is supported): %q", r.Header.Get("Sec-WebSocket-Version"))
+ }
+
+ if r.Header.Get("Sec-WebSocket-Key") == "" {
+ return http.StatusBadRequest, errors.New("WebSocket protocol violation: missing Sec-WebSocket-Key")
+ }
+
+ return 0, nil
+}
+
+func authenticateOrigin(r *http.Request, originHosts []string) error {
+ origin := r.Header.Get("Origin")
+ if origin == "" {
+ return nil
+ }
+
+ u, err := url.Parse(origin)
+ if err != nil {
+ return fmt.Errorf("failed to parse Origin header %q: %w", origin, err)
+ }
+
+ if strings.EqualFold(r.Host, u.Host) {
+ return nil
+ }
+
+ for _, hostPattern := range originHosts {
+ matched, err := match(hostPattern, u.Host)
+ if err != nil {
+ return fmt.Errorf("failed to parse filepath pattern %q: %w", hostPattern, err)
+ }
+ if matched {
+ return nil
+ }
+ }
+ return fmt.Errorf("request Origin %q is not authorized for Host %q", origin, r.Host)
+}
+
+func match(pattern, s string) (bool, error) {
+ return filepath.Match(strings.ToLower(pattern), strings.ToLower(s))
+}
+
+func selectSubprotocol(r *http.Request, subprotocols []string) string {
+ cps := headerTokens(r.Header, "Sec-WebSocket-Protocol")
+ for _, sp := range subprotocols {
+ for _, cp := range cps {
+ if strings.EqualFold(sp, cp) {
+ return cp
+ }
+ }
+ }
+ return ""
+}
+
+func acceptCompression(r *http.Request, w http.ResponseWriter, mode CompressionMode) (*compressionOptions, error) {
+ if mode == CompressionDisabled {
+ return nil, nil
+ }
+
+ for _, ext := range websocketExtensions(r.Header) {
+ switch ext.name {
+ case "permessage-deflate":
+ return acceptDeflate(w, ext, mode)
+ // Disabled for now, see https://github.com/nhooyr/websocket/issues/218
+ // case "x-webkit-deflate-frame":
+ // return acceptWebkitDeflate(w, ext, mode)
+ }
+ }
+ return nil, nil
+}
+
+func acceptDeflate(w http.ResponseWriter, ext websocketExtension, mode CompressionMode) (*compressionOptions, error) {
+ copts := mode.opts()
+
+ for _, p := range ext.params {
+ switch p {
+ case "client_no_context_takeover":
+ copts.clientNoContextTakeover = true
+ continue
+ case "server_no_context_takeover":
+ copts.serverNoContextTakeover = true
+ continue
+ }
+
+ if strings.HasPrefix(p, "client_max_window_bits") {
+ // We cannot adjust the read sliding window so cannot make use of this.
+ continue
+ }
+
+ err := fmt.Errorf("unsupported permessage-deflate parameter: %q", p)
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return nil, err
+ }
+
+ copts.setHeader(w.Header())
+
+ return copts, nil
+}
+
+func acceptWebkitDeflate(w http.ResponseWriter, ext websocketExtension, mode CompressionMode) (*compressionOptions, error) {
+ copts := mode.opts()
+ // The peer must explicitly request it.
+ copts.serverNoContextTakeover = false
+
+ for _, p := range ext.params {
+ if p == "no_context_takeover" {
+ copts.serverNoContextTakeover = true
+ continue
+ }
+
+ // We explicitly fail on x-webkit-deflate-frame's max_window_bits parameter instead
+ // of ignoring it as the draft spec is unclear. It says the server can ignore it
+ // but the server has no way of signalling to the client it was ignored as the parameters
+ // are set one way.
+ // Thus us ignoring it would make the client think we understood it which would cause issues.
+ // See https://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate-06#section-4.1
+ //
+ // Either way, we're only implementing this for webkit which never sends the max_window_bits
+ // parameter so we don't need to worry about it.
+ err := fmt.Errorf("unsupported x-webkit-deflate-frame parameter: %q", p)
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return nil, err
+ }
+
+ s := "x-webkit-deflate-frame"
+ if copts.clientNoContextTakeover {
+ s += "; no_context_takeover"
+ }
+ w.Header().Set("Sec-WebSocket-Extensions", s)
+
+ return copts, nil
+}
+
+func headerContainsTokenIgnoreCase(h http.Header, key, token string) bool {
+ for _, t := range headerTokens(h, key) {
+ if strings.EqualFold(t, token) {
+ return true
+ }
+ }
+ return false
+}
+
+type websocketExtension struct {
+ name string
+ params []string
+}
+
+func websocketExtensions(h http.Header) []websocketExtension {
+ var exts []websocketExtension
+ extStrs := headerTokens(h, "Sec-WebSocket-Extensions")
+ for _, extStr := range extStrs {
+ if extStr == "" {
+ continue
+ }
+
+ vals := strings.Split(extStr, ";")
+ for i := range vals {
+ vals[i] = strings.TrimSpace(vals[i])
+ }
+
+ e := websocketExtension{
+ name: vals[0],
+ params: vals[1:],
+ }
+
+ exts = append(exts, e)
+ }
+ return exts
+}
+
+func headerTokens(h http.Header, key string) []string {
+ key = textproto.CanonicalMIMEHeaderKey(key)
+ var tokens []string
+ for _, v := range h[key] {
+ v = strings.TrimSpace(v)
+ for _, t := range strings.Split(v, ",") {
+ t = strings.TrimSpace(t)
+ tokens = append(tokens, t)
+ }
+ }
+ return tokens
+}
+
+var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
+
+func secWebSocketAccept(secWebSocketKey string) string {
+ h := sha1.New()
+ h.Write([]byte(secWebSocketKey))
+ h.Write(keyGUID)
+
+ return base64.StdEncoding.EncodeToString(h.Sum(nil))
+}
diff --git a/vendor/nhooyr.io/websocket/accept_js.go b/vendor/nhooyr.io/websocket/accept_js.go
new file mode 100644
index 00000000..daad4b79
--- /dev/null
+++ b/vendor/nhooyr.io/websocket/accept_js.go
@@ -0,0 +1,20 @@
+package websocket
+
+import (
+ "errors"
+ "net/http"
+)
+
+// AcceptOptions represents Accept's options.
+type AcceptOptions struct {
+ Subprotocols []string
+ InsecureSkipVerify bool
+ OriginPatterns []string
+ CompressionMode CompressionMode
+ CompressionThreshold int
+}
+
+// Accept is stubbed out for Wasm.
+func Accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (*Conn, error) {
+ return nil, errors.New("unimplemented")
+}
diff --git a/vendor/nhooyr.io/websocket/close.go b/vendor/nhooyr.io/websocket/close.go
new file mode 100644
index 00000000..7cbc19e9
--- /dev/null
+++ b/vendor/nhooyr.io/websocket/close.go
@@ -0,0 +1,76 @@
+package websocket
+
+import (
+ "errors"
+ "fmt"
+)
+
+// StatusCode represents a WebSocket status code.
+// https://tools.ietf.org/html/rfc6455#section-7.4
+type StatusCode int
+
+// https://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number
+//
+// These are only the status codes defined by the protocol.
+//
+// You can define custom codes in the 3000-4999 range.
+// The 3000-3999 range is reserved for use by libraries, frameworks and applications.
+// The 4000-4999 range is reserved for private use.
+const (
+ StatusNormalClosure StatusCode = 1000
+ StatusGoingAway StatusCode = 1001
+ StatusProtocolError StatusCode = 1002
+ StatusUnsupportedData StatusCode = 1003
+
+ // 1004 is reserved and so unexported.
+ statusReserved StatusCode = 1004
+
+ // StatusNoStatusRcvd cannot be sent in a close message.
+ // It is reserved for when a close message is received without
+ // a status code.
+ StatusNoStatusRcvd StatusCode = 1005
+
+ // StatusAbnormalClosure is exported for use only with Wasm.
+ // In non Wasm Go, the returned error will indicate whether the
+ // connection was closed abnormally.
+ StatusAbnormalClosure StatusCode = 1006
+
+ StatusInvalidFramePayloadData StatusCode = 1007
+ StatusPolicyViolation StatusCode = 1008
+ StatusMessageTooBig StatusCode = 1009
+ StatusMandatoryExtension StatusCode = 1010
+ StatusInternalError StatusCode = 1011
+ StatusServiceRestart StatusCode = 1012
+ StatusTryAgainLater StatusCode = 1013
+ StatusBadGateway StatusCode = 1014
+
+ // StatusTLSHandshake is only exported for use with Wasm.
+ // In non Wasm Go, the returned error will indicate whether there was
+ // a TLS handshake failure.
+ StatusTLSHandshake StatusCode = 1015
+)
+
+// CloseError is returned when the connection is closed with a status and reason.
+//
+// Use Go 1.13's errors.As to check for this error.
+// Also see the CloseStatus helper.
+type CloseError struct {
+ Code StatusCode
+ Reason string
+}
+
+func (ce CloseError) Error() string {
+ return fmt.Sprintf("status = %v and reason = %q", ce.Code, ce.Reason)
+}
+
+// CloseStatus is a convenience wrapper around Go 1.13's errors.As to grab
+// the status code from a CloseError.
+//
+// -1 will be returned if the passed error is nil or not a CloseError.
+func CloseStatus(err error) StatusCode {
+ var ce CloseError
+ if errors.As(err, &ce) {
+ return ce.Code
+ }
+ return -1
+}
diff --git a/vendor/nhooyr.io/websocket/close_notjs.go b/vendor/nhooyr.io/websocket/close_notjs.go
new file mode 100644
index 00000000..4251311d
--- /dev/null
+++ b/vendor/nhooyr.io/websocket/close_notjs.go
@@ -0,0 +1,211 @@
+// +build !js
+
+package websocket
+
+import (
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "log"
+ "time"
+
+ "nhooyr.io/websocket/internal/errd"
+)
+
+// Close performs the WebSocket close handshake with the given status code and reason.
+//
+// It will write a WebSocket close frame with a timeout of 5s and then wait 5s for
+// the peer to send a close frame.
+// All data messages received from the peer during the close handshake will be discarded.
+//
+// The connection can only be closed once. Additional calls to Close
+// are no-ops.
+//
+// The maximum length of reason must be 125 bytes. Avoid
+// sending a dynamic reason.
+//
+// Close will unblock all goroutines interacting with the connection once
+// complete.
+func (c *Conn) Close(code StatusCode, reason string) error {
+ return c.closeHandshake(code, reason)
+}
+
+func (c *Conn) closeHandshake(code StatusCode, reason string) (err error) {
+ defer errd.Wrap(&err, "failed to close WebSocket")
+
+ writeErr := c.writeClose(code, reason)
+ closeHandshakeErr := c.waitCloseHandshake()
+
+ if writeErr != nil {
+ return writeErr
+ }
+
+ if CloseStatus(closeHandshakeErr) == -1 {
+ return closeHandshakeErr
+ }
+
+ return nil
+}
+
+var errAlreadyWroteClose = errors.New("already wrote close")
+
+func (c *Conn) writeClose(code StatusCode, reason string) error {
+ c.closeMu.Lock()
+ wroteClose := c.wroteClose
+ c.wroteClose = true
+ c.closeMu.Unlock()
+ if wroteClose {
+ return errAlreadyWroteClose
+ }
+
+ ce := CloseError{
+ Code: code,
+ Reason: reason,
+ }
+
+ var p []byte
+ var marshalErr error
+ if ce.Code != StatusNoStatusRcvd {
+ p, marshalErr = ce.bytes()
+ if marshalErr != nil {
+ log.Printf("websocket: %v", marshalErr)
+ }
+ }
+
+ writeErr := c.writeControl(context.Background(), opClose, p)
+ if CloseStatus(writeErr) != -1 {
+ // Not a real error if it's due to a close frame being received.
+ writeErr = nil
+ }
+
+ // We do this after in case there was an error writing the close frame.
+ c.setCloseErr(fmt.Errorf("sent close frame: %w", ce))
+
+ if marshalErr != nil {
+ return marshalErr
+ }
+ return writeErr
+}
+
+func (c *Conn) waitCloseHandshake() error {
+ defer c.close(nil)
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
+
+ err := c.readMu.lock(ctx)
+ if err != nil {
+ return err
+ }
+ defer c.readMu.unlock()
+
+ if c.readCloseFrameErr != nil {
+ return c.readCloseFrameErr
+ }
+
+ for {
+ h, err := c.readLoop(ctx)
+ if err != nil {
+ return err
+ }
+
+ for i := int64(0); i < h.payloadLength; i++ {
+ _, err := c.br.ReadByte()
+ if err != nil {
+ return err
+ }
+ }
+ }
+}
+
+func parseClosePayload(p []byte) (CloseError, error) {
+ if len(p) == 0 {
+ return CloseError{
+ Code: StatusNoStatusRcvd,
+ }, nil
+ }
+
+ if len(p) < 2 {
+ return CloseError{}, fmt.Errorf("close payload %q too small, cannot even contain the 2 byte status code", p)
+ }
+
+ ce := CloseError{
+ Code: StatusCode(binary.BigEndian.Uint16(p)),
+ Reason: string(p[2:]),
+ }
+
+ if !validWireCloseCode(ce.Code) {
+ return CloseError{}, fmt.Errorf("invalid status code %v", ce.Code)
+ }
+
+ return ce, nil
+}
+
+// See http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number
+// and https://tools.ietf.org/html/rfc6455#section-7.4.1
+func validWireCloseCode(code StatusCode) bool {
+ switch code {
+ case statusReserved, StatusNoStatusRcvd, StatusAbnormalClosure, StatusTLSHandshake:
+ return false
+ }
+
+ if code >= StatusNormalClosure && code <= StatusBadGateway {
+ return true
+ }
+ if code >= 3000 && code <= 4999 {
+ return true
+ }
+
+ return false
+}
+
+func (ce CloseError) bytes() ([]byte, error) {
+ p, err := ce.bytesErr()
+ if err != nil {
+ err = fmt.Errorf("failed to marshal close frame: %w", err)
+ ce = CloseError{
+ Code: StatusInternalError,
+ }
+ p, _ = ce.bytesErr()
+ }
+ return p, err
+}
+
+const maxCloseReason = maxControlPayload - 2
+
+func (ce CloseError) bytesErr() ([]byte, error) {
+ if len(ce.Reason) > maxCloseReason {
+ return nil, fmt.Errorf("reason string max is %v but got %q with length %v", maxCloseReason, ce.Reason, len(ce.Reason))
+ }
+
+ if !validWireCloseCode(ce.Code) {
+ return nil, fmt.Errorf("status code %v cannot be set", ce.Code)
+ }
+
+ buf := make([]byte, 2+len(ce.Reason))
+ binary.BigEndian.PutUint16(buf, uint16(ce.Code))
+ copy(buf[2:], ce.Reason)
+ return buf, nil
+}
+
+func (c *Conn) setCloseErr(err error) {
+ c.closeMu.Lock()
+ c.setCloseErrLocked(err)
+ c.closeMu.Unlock()
+}
+
+func (c *Conn) setCloseErrLocked(err error) {
+ if c.closeErr == nil {
+ c.closeErr = fmt.Errorf("WebSocket closed: %w", err)
+ }
+}
+
+func (c *Conn) isClosed() bool {
+ select {
+ case <-c.closed:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/nhooyr.io/websocket/compress.go b/vendor/nhooyr.io/websocket/compress.go
new file mode 100644
index 00000000..80b46d1c
--- /dev/null
+++ b/vendor/nhooyr.io/websocket/compress.go
@@ -0,0 +1,39 @@
+package websocket
+
+// CompressionMode represents the modes available to the deflate extension.
+// See https://tools.ietf.org/html/rfc7692
+//
+// A compatibility layer is implemented for the older deflate-frame extension used
+// by safari. See https://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate-06
+// It will work the same in every way except that we cannot signal to the peer we
+// want to use no context takeover on our side, we can only signal that they should.
+// It is however currently disabled due to Safari bugs. See https://github.com/nhooyr/websocket/issues/218
+type CompressionMode int
+
+const (
+ // CompressionNoContextTakeover grabs a new flate.Reader and flate.Writer as needed
+ // for every message. This applies to both server and client side.
+ //
+ // This means less efficient compression as the sliding window from previous messages
+ // will not be used but the memory overhead will be lower if the connections
+ // are long lived and seldom used.
+ //
+ // The message will only be compressed if greater than 512 bytes.
+ CompressionNoContextTakeover CompressionMode = iota
+
+ // CompressionContextTakeover uses a flate.Reader and flate.Writer per connection.
+ // This enables reusing the sliding window from previous messages.
+ // As most WebSocket protocols are repetitive, this can be very efficient.
+ // It carries an overhead of 8 kB for every connection compared to CompressionNoContextTakeover.
+ //
+ // If the peer negotiates NoContextTakeover on the client or server side, it will be
+ // used instead as this is required by the RFC.
+ CompressionContextTakeover
+
+ // CompressionDisabled disables the deflate extension.
+ //
+ // Use this if you are using a predominantly binary protocol with very
+ // little duplication in between messages or CPU and memory are more
+ // important than bandwidth.
+ CompressionDisabled
+)
diff --git a/vendor/nhooyr.io/websocket/compress_notjs.go b/vendor/nhooyr.io/websocket/compress_notjs.go
new file mode 100644
index 00000000..809a272c
--- /dev/null
+++ b/vendor/nhooyr.io/websocket/compress_notjs.go
@@ -0,0 +1,181 @@
+// +build !js
+
+package websocket
+
+import (
+ "io"
+ "net/http"
+ "sync"
+
+ "github.com/klauspost/compress/flate"
+)
+
+func (m CompressionMode) opts() *compressionOptions {
+ return &compressionOptions{
+ clientNoContextTakeover: m == CompressionNoContextTakeover,
+ serverNoContextTakeover: m == CompressionNoContextTakeover,
+ }
+}
+
+type compressionOptions struct {
+ clientNoContextTakeover bool
+ serverNoContextTakeover bool
+}
+
+func (copts *compressionOptions) setHeader(h http.Header) {
+ s := "permessage-deflate"
+ if copts.clientNoContextTakeover {
+ s += "; client_no_context_takeover"
+ }
+ if copts.serverNoContextTakeover {
+ s += "; server_no_context_takeover"
+ }
+ h.Set("Sec-WebSocket-Extensions", s)
+}
+
+// These bytes are required to get flate.Reader to return.
+// They are removed when sending to avoid the overhead as
+// WebSocket framing tell's when the message has ended but then
+// we need to add them back otherwise flate.Reader keeps
+// trying to return more bytes.
+const deflateMessageTail = "\x00\x00\xff\xff"
+
+type trimLastFourBytesWriter struct {
+ w io.Writer
+ tail []byte
+}
+
+func (tw *trimLastFourBytesWriter) reset() {
+ if tw != nil && tw.tail != nil {
+ tw.tail = tw.tail[:0]
+ }
+}
+
+func (tw *trimLastFourBytesWriter) Write(p []byte) (int, error) {
+ if tw.tail == nil {
+ tw.tail = make([]byte, 0, 4)
+ }
+
+ extra := len(tw.tail) + len(p) - 4
+
+ if extra <= 0 {
+ tw.tail = append(tw.tail, p...)
+ return len(p), nil
+ }
+
+ // Now we need to write as many extra bytes as we can from the previous tail.
+ if extra > len(tw.tail) {
+ extra = len(tw.tail)
+ }
+ if extra > 0 {
+ _, err := tw.w.Write(tw.tail[:extra])
+ if err != nil {
+ return 0, err
+ }
+
+ // Shift remaining bytes in tail over.
+ n := copy(tw.tail, tw.tail[extra:])
+ tw.tail = tw.tail[:n]
+ }
+
+ // If p is less than or equal to 4 bytes,
+ // all of it is is part of the tail.
+ if len(p) <= 4 {
+ tw.tail = append(tw.tail, p...)
+ return len(p), nil
+ }
+
+ // Otherwise, only the last 4 bytes are.
+ tw.tail = append(tw.tail, p[len(p)-4:]...)
+
+ p = p[:len(p)-4]
+ n, err := tw.w.Write(p)
+ return n + 4, err
+}
+
+var flateReaderPool sync.Pool
+
+func getFlateReader(r io.Reader, dict []byte) io.Reader {
+ fr, ok := flateReaderPool.Get().(io.Reader)
+ if !ok {
+ return flate.NewReaderDict(r, dict)
+ }
+ fr.(flate.Resetter).Reset(r, dict)
+ return fr
+}
+
+func putFlateReader(fr io.Reader) {
+ flateReaderPool.Put(fr)
+}
+
+type slidingWindow struct {
+ buf []byte
+}
+
+var swPoolMu sync.RWMutex
+var swPool = map[int]*sync.Pool{}
+
+func slidingWindowPool(n int) *sync.Pool {
+ swPoolMu.RLock()
+ p, ok := swPool[n]
+ swPoolMu.RUnlock()
+ if ok {
+ return p
+ }
+
+ p = &sync.Pool{}
+
+ swPoolMu.Lock()
+ swPool[n] = p
+ swPoolMu.Unlock()
+
+ return p
+}
+
+func (sw *slidingWindow) init(n int) {
+ if sw.buf != nil {
+ return
+ }
+
+ if n == 0 {
+ n = 32768
+ }
+
+ p := slidingWindowPool(n)
+ buf, ok := p.Get().([]byte)
+ if ok {
+ sw.buf = buf[:0]
+ } else {
+ sw.buf = make([]byte, 0, n)
+ }
+}
+
+func (sw *slidingWindow) close() {
+ if sw.buf == nil {
+ return
+ }
+
+ swPoolMu.Lock()
+ swPool[cap(sw.buf)].Put(sw.buf)
+ swPoolMu.Unlock()
+ sw.buf = nil
+}
+
+func (sw *slidingWindow) write(p []byte) {
+ if len(p) >= cap(sw.buf) {
+ sw.buf = sw.buf[:cap(sw.buf)]
+ p = p[len(p)-cap(sw.buf):]
+ copy(sw.buf, p)
+ return
+ }
+
+ left := cap(sw.buf) - len(sw.buf)
+ if left < len(p) {
+ // We need to shift spaceNeeded bytes from the end to make room for p at the end.
+ spaceNeeded := len(p) - left
+ copy(sw.buf, sw.buf[spaceNeeded:])
+ sw.buf = sw.buf[:len(sw.buf)-spaceNeeded]
+ }
+
+ sw.buf = append(sw.buf, p...)
+}
diff --git a/vendor/nhooyr.io/websocket/conn.go b/vendor/nhooyr.io/websocket/conn.go
new file mode 100644
index 00000000..a41808be
--- /dev/null
+++ b/vendor/nhooyr.io/websocket/conn.go
@@ -0,0 +1,13 @@
+package websocket
+
+// MessageType represents the type of a WebSocket message.
+// See https://tools.ietf.org/html/rfc6455#section-5.6
+type MessageType int
+
+// MessageType constants.
+const (
+ // MessageText is for UTF-8 encoded text messages like JSON.
+ MessageText MessageType = iota + 1
+ // MessageBinary is for binary messages like protobufs.
+ MessageBinary
+)
diff --git a/vendor/nhooyr.io/websocket/conn_notjs.go b/vendor/nhooyr.io/websocket/conn_notjs.go
new file mode 100644
index 00000000..0c85ab77
--- /dev/null
+++ b/vendor/nhooyr.io/websocket/conn_notjs.go
@@ -0,0 +1,265 @@
+// +build !js
+
+package websocket
+
+import (
+ "bufio"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "runtime"
+ "strconv"
+ "sync"
+ "sync/atomic"
+)
+
+// Conn represents a WebSocket connection.
+// All methods may be called concurrently except for Reader and Read.
+//
+// You must always read from the connection. Otherwise control
+// frames will not be handled. See Reader and CloseRead.
+//
+// Be sure to call Close on the connection when you
+// are finished with it to release associated resources.
+//
+// On any error from any method, the connection is closed
+// with an appropriate reason.
+type Conn struct {
+ subprotocol string
+ rwc io.ReadWriteCloser
+ client bool
+ copts *compressionOptions
+ flateThreshold int
+ br *bufio.Reader
+ bw *bufio.Writer
+
+ readTimeout chan context.Context
+ writeTimeout chan context.Context
+
+ // Read state.
+ readMu *mu
+ readHeaderBuf [8]byte
+ readControlBuf [maxControlPayload]byte
+ msgReader *msgReader
+ readCloseFrameErr error
+
+ // Write state.
+ msgWriterState *msgWriterState
+ writeFrameMu *mu
+ writeBuf []byte
+ writeHeaderBuf [8]byte
+ writeHeader header
+
+ closed chan struct{}
+ closeMu sync.Mutex
+ closeErr error
+ wroteClose bool
+
+ pingCounter int32
+ activePingsMu sync.Mutex
+ activePings map[string]chan<- struct{}
+}
+
+type connConfig struct {
+ subprotocol string
+ rwc io.ReadWriteCloser
+ client bool
+ copts *compressionOptions
+ flateThreshold int
+
+ br *bufio.Reader
+ bw *bufio.Writer
+}
+
+func newConn(cfg connConfig) *Conn {
+ c := &Conn{
+ subprotocol: cfg.subprotocol,
+ rwc: cfg.rwc,
+ client: cfg.client,
+ copts: cfg.copts,
+ flateThreshold: cfg.flateThreshold,
+
+ br: cfg.br,
+ bw: cfg.bw,
+
+ readTimeout: make(chan context.Context),
+ writeTimeout: make(chan context.Context),
+
+ closed: make(chan struct{}),
+ activePings: make(map[string]chan<- struct{}),
+ }
+
+ c.readMu = newMu(c)
+ c.writeFrameMu = newMu(c)
+
+ c.msgReader = newMsgReader(c)
+
+ c.msgWriterState = newMsgWriterState(c)
+ if c.client {
+ c.writeBuf = extractBufioWriterBuf(c.bw, c.rwc)
+ }
+
+ if c.flate() && c.flateThreshold == 0 {
+ c.flateThreshold = 128
+ if !c.msgWriterState.flateContextTakeover() {
+ c.flateThreshold = 512
+ }
+ }
+
+ runtime.SetFinalizer(c, func(c *Conn) {
+ c.close(errors.New("connection garbage collected"))
+ })
+
+ go c.timeoutLoop()
+
+ return c
+}
+
+// Subprotocol returns the negotiated subprotocol.
+// An empty string means the default protocol.
+func (c *Conn) Subprotocol() string {
+ return c.subprotocol
+}
+
+func (c *Conn) close(err error) {
+ c.closeMu.Lock()
+ defer c.closeMu.Unlock()
+
+ if c.isClosed() {
+ return
+ }
+ c.setCloseErrLocked(err)
+ close(c.closed)
+ runtime.SetFinalizer(c, nil)
+
+ // Have to close after c.closed is closed to ensure any goroutine that wakes up
+ // from the connection being closed also sees that c.closed is closed and returns
+ // closeErr.
+ c.rwc.Close()
+
+ go func() {
+ c.msgWriterState.close()
+
+ c.msgReader.close()
+ }()
+}
+
+func (c *Conn) timeoutLoop() {
+ readCtx := context.Background()
+ writeCtx := context.Background()
+
+ for {
+ select {
+ case <-c.closed:
+ return
+
+ case writeCtx = <-c.writeTimeout:
+ case readCtx = <-c.readTimeout:
+
+ case <-readCtx.Done():
+ c.setCloseErr(fmt.Errorf("read timed out: %w", readCtx.Err()))
+ go c.writeError(StatusPolicyViolation, errors.New("timed out"))
+ case <-writeCtx.Done():
+ c.close(fmt.Errorf("write timed out: %w", writeCtx.Err()))
+ return
+ }
+ }
+}
+
+func (c *Conn) flate() bool {
+ return c.copts != nil
+}
+
+// Ping sends a ping to the peer and waits for a pong.
+// Use this to measure latency or ensure the peer is responsive.
+// Ping must be called concurrently with Reader as it does
+// not read from the connection but instead waits for a Reader call
+// to read the pong.
+//
+// TCP Keepalives should suffice for most use cases.
+func (c *Conn) Ping(ctx context.Context) error {
+ p := atomic.AddInt32(&c.pingCounter, 1)
+
+ err := c.ping(ctx, strconv.Itoa(int(p)))
+ if err != nil {
+ return fmt.Errorf("failed to ping: %w", err)
+ }
+ return nil
+}
+
+func (c *Conn) ping(ctx context.Context, p string) error {
+ pong := make(chan struct{}, 1)
+
+ c.activePingsMu.Lock()
+ c.activePings[p] = pong
+ c.activePingsMu.Unlock()
+
+ defer func() {
+ c.activePingsMu.Lock()
+ delete(c.activePings, p)
+ c.activePingsMu.Unlock()
+ }()
+
+ err := c.writeControl(ctx, opPing, []byte(p))
+ if err != nil {
+ return err
+ }
+
+ select {
+ case <-c.closed:
+ return c.closeErr
+ case <-ctx.Done():
+ err := fmt.Errorf("failed to wait for pong: %w", ctx.Err())
+ c.close(err)
+ return err
+ case <-pong:
+ return nil
+ }
+}
+
+type mu struct {
+ c *Conn
+ ch chan struct{}
+}
+
+func newMu(c *Conn) *mu {
+ return &mu{
+ c: c,
+ ch: make(chan struct{}, 1),
+ }
+}
+
+func (m *mu) forceLock() {
+ m.ch <- struct{}{}
+}
+
+func (m *mu) lock(ctx context.Context) error {
+ select {
+ case <-m.c.closed:
+ return m.c.closeErr
+ case <-ctx.Done():
+ err := fmt.Errorf("failed to acquire lock: %w", ctx.Err())
+ m.c.close(err)
+ return err
+ case m.ch <- struct{}{}:
+ // To make sure the connection is certainly alive.
+ // As it's possible the send on m.ch was selected
+ // over the receive on closed.
+ select {
+ case <-m.c.closed:
+ // Make sure to release.
+ m.unlock()
+ return m.c.closeErr
+ default:
+ }
+ return nil
+ }
+}
+
+func (m *mu) unlock() {
+ select {
+ case <-m.ch:
+ default:
+ }
+}
diff --git a/vendor/nhooyr.io/websocket/dial.go b/vendor/nhooyr.io/websocket/dial.go
new file mode 100644
index 00000000..7a7787ff
--- /dev/null
+++ b/vendor/nhooyr.io/websocket/dial.go
@@ -0,0 +1,292 @@
+// +build !js
+
+package websocket
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "crypto/rand"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+ "time"
+
+ "nhooyr.io/websocket/internal/errd"
+)
+
+// DialOptions represents Dial's options.
+type DialOptions struct {
+ // HTTPClient is used for the connection.
+ // Its Transport must return writable bodies for WebSocket handshakes.
+ // http.Transport does beginning with Go 1.12.
+ HTTPClient *http.Client
+
+ // HTTPHeader specifies the HTTP headers included in the handshake request.
+ HTTPHeader http.Header
+
+ // Subprotocols lists the WebSocket subprotocols to negotiate with the server.
+ Subprotocols []string
+
+ // CompressionMode controls the compression mode.
+ // Defaults to CompressionNoContextTakeover.
+ //
+ // See docs on CompressionMode for details.
+ CompressionMode CompressionMode
+
+ // CompressionThreshold controls the minimum size of a message before compression is applied.
+ //
+ // Defaults to 512 bytes for CompressionNoContextTakeover and 128 bytes
+ // for CompressionContextTakeover.
+ CompressionThreshold int
+}
+
+// Dial performs a WebSocket handshake on url.
+//
+// The response is the WebSocket handshake response from the server.
+// You never need to close resp.Body yourself.
+//
+// If an error occurs, the returned response may be non nil.
+// However, you can only read the first 1024 bytes of the body.
+//
+// This function requires at least Go 1.12 as it uses a new feature
+// in net/http to perform WebSocket handshakes.
+// See docs on the HTTPClient option and https://github.com/golang/go/issues/26937#issuecomment-415855861
+//
+// URLs with http/https schemes will work and are interpreted as ws/wss.
+func Dial(ctx context.Context, u string, opts *DialOptions) (*Conn, *http.Response, error) {
+ return dial(ctx, u, opts, nil)
+}
+
+func dial(ctx context.Context, urls string, opts *DialOptions, rand io.Reader) (_ *Conn, _ *http.Response, err error) {
+ defer errd.Wrap(&err, "failed to WebSocket dial")
+
+ if opts == nil {
+ opts = &DialOptions{}
+ }
+
+ opts = &*opts
+ if opts.HTTPClient == nil {
+ opts.HTTPClient = http.DefaultClient
+ } else if opts.HTTPClient.Timeout > 0 {
+ var cancel context.CancelFunc
+
+ ctx, cancel = context.WithTimeout(ctx, opts.HTTPClient.Timeout)
+ defer cancel()
+
+ newClient := *opts.HTTPClient
+ newClient.Timeout = 0
+ opts.HTTPClient = &newClient
+ }
+
+ if opts.HTTPHeader == nil {
+ opts.HTTPHeader = http.Header{}
+ }
+
+ secWebSocketKey, err := secWebSocketKey(rand)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to generate Sec-WebSocket-Key: %w", err)
+ }
+
+ var copts *compressionOptions
+ if opts.CompressionMode != CompressionDisabled {
+ copts = opts.CompressionMode.opts()
+ }
+
+ resp, err := handshakeRequest(ctx, urls, opts, copts, secWebSocketKey)
+ if err != nil {
+ return nil, resp, err
+ }
+ respBody := resp.Body
+ resp.Body = nil
+ defer func() {
+ if err != nil {
+ // We read a bit of the body for easier debugging.
+ r := io.LimitReader(respBody, 1024)
+
+ timer := time.AfterFunc(time.Second*3, func() {
+ respBody.Close()
+ })
+ defer timer.Stop()
+
+ b, _ := ioutil.ReadAll(r)
+ respBody.Close()
+ resp.Body = ioutil.NopCloser(bytes.NewReader(b))
+ }
+ }()
+
+ copts, err = verifyServerResponse(opts, copts, secWebSocketKey, resp)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ rwc, ok := respBody.(io.ReadWriteCloser)
+ if !ok {
+ return nil, resp, fmt.Errorf("response body is not a io.ReadWriteCloser: %T", respBody)
+ }
+
+ return newConn(connConfig{
+ subprotocol: resp.Header.Get("Sec-WebSocket-Protocol"),
+ rwc: rwc,
+ client: true,
+ copts: copts,
+ flateThreshold: opts.CompressionThreshold,
+ br: getBufioReader(rwc),
+ bw: getBufioWriter(rwc),
+ }), resp, nil
+}
+
+func handshakeRequest(ctx context.Context, urls string, opts *DialOptions, copts *compressionOptions, secWebSocketKey string) (*http.Response, error) {
+ u, err := url.Parse(urls)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse url: %w", err)
+ }
+
+ switch u.Scheme {
+ case "ws":
+ u.Scheme = "http"
+ case "wss":
+ u.Scheme = "https"
+ case "http", "https":
+ default:
+ return nil, fmt.Errorf("unexpected url scheme: %q", u.Scheme)
+ }
+
+ req, _ := http.NewRequestWithContext(ctx, "GET", u.String(), nil)
+ req.Header = opts.HTTPHeader.Clone()
+ req.Header.Set("Connection", "Upgrade")
+ req.Header.Set("Upgrade", "websocket")
+ req.Header.Set("Sec-WebSocket-Version", "13")
+ req.Header.Set("Sec-WebSocket-Key", secWebSocketKey)
+ if len(opts.Subprotocols) > 0 {
+ req.Header.Set("Sec-WebSocket-Protocol", strings.Join(opts.Subprotocols, ","))
+ }
+ if copts != nil {
+ copts.setHeader(req.Header)
+ }
+
+ resp, err := opts.HTTPClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to send handshake request: %w", err)
+ }
+ return resp, nil
+}
+
+func secWebSocketKey(rr io.Reader) (string, error) {
+ if rr == nil {
+ rr = rand.Reader
+ }
+ b := make([]byte, 16)
+ _, err := io.ReadFull(rr, b)
+ if err != nil {
+ return "", fmt.Errorf("failed to read random data from rand.Reader: %w", err)
+ }
+ return base64.StdEncoding.EncodeToString(b), nil
+}
+
+func verifyServerResponse(opts *DialOptions, copts *compressionOptions, secWebSocketKey string, resp *http.Response) (*compressionOptions, error) {
+ if resp.StatusCode != http.StatusSwitchingProtocols {
+ return nil, fmt.Errorf("expected handshake response status code %v but got %v", http.StatusSwitchingProtocols, resp.StatusCode)
+ }
+
+ if !headerContainsTokenIgnoreCase(resp.Header, "Connection", "Upgrade") {
+ return nil, fmt.Errorf("WebSocket protocol violation: Connection header %q does not contain Upgrade", resp.Header.Get("Connection"))
+ }
+
+ if !headerContainsTokenIgnoreCase(resp.Header, "Upgrade", "WebSocket") {
+ return nil, fmt.Errorf("WebSocket protocol violation: Upgrade header %q does not contain websocket", resp.Header.Get("Upgrade"))
+ }
+
+ if resp.Header.Get("Sec-WebSocket-Accept") != secWebSocketAccept(secWebSocketKey) {
+ return nil, fmt.Errorf("WebSocket protocol violation: invalid Sec-WebSocket-Accept %q, key %q",
+ resp.Header.Get("Sec-WebSocket-Accept"),
+ secWebSocketKey,
+ )
+ }
+
+ err := verifySubprotocol(opts.Subprotocols, resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return verifyServerExtensions(copts, resp.Header)
+}
+
+func verifySubprotocol(subprotos []string, resp *http.Response) error {
+ proto := resp.Header.Get("Sec-WebSocket-Protocol")
+ if proto == "" {
+ return nil
+ }
+
+ for _, sp2 := range subprotos {
+ if strings.EqualFold(sp2, proto) {
+ return nil
+ }
+ }
+
+ return fmt.Errorf("WebSocket protocol violation: unexpected Sec-WebSocket-Protocol from server: %q", proto)
+}
+
+func verifyServerExtensions(copts *compressionOptions, h http.Header) (*compressionOptions, error) {
+ exts := websocketExtensions(h)
+ if len(exts) == 0 {
+ return nil, nil
+ }
+
+ ext := exts[0]
+ if ext.name != "permessage-deflate" || len(exts) > 1 || copts == nil {
+ return nil, fmt.Errorf("WebSocket protcol violation: unsupported extensions from server: %+v", exts[1:])
+ }
+
+ copts = &*copts
+
+ for _, p := range ext.params {
+ switch p {
+ case "client_no_context_takeover":
+ copts.clientNoContextTakeover = true
+ continue
+ case "server_no_context_takeover":
+ copts.serverNoContextTakeover = true
+ continue
+ }
+
+ return nil, fmt.Errorf("unsupported permessage-deflate parameter: %q", p)
+ }
+
+ return copts, nil
+}
+
+var bufioReaderPool sync.Pool
+
+func getBufioReader(r io.Reader) *bufio.Reader {
+ br, ok := bufioReaderPool.Get().(*bufio.Reader)
+ if !ok {
+ return bufio.NewReader(r)
+ }
+ br.Reset(r)
+ return br
+}
+
+func putBufioReader(br *bufio.Reader) {
+ bufioReaderPool.Put(br)
+}
+
+var bufioWriterPool sync.Pool
+
+func getBufioWriter(w io.Writer) *bufio.Writer {
+ bw, ok := bufioWriterPool.Get().(*bufio.Writer)
+ if !ok {
+ return bufio.NewWriter(w)
+ }
+ bw.Reset(w)
+ return bw
+}
+
+func putBufioWriter(bw *bufio.Writer) {
+ bufioWriterPool.Put(bw)
+}
diff --git a/vendor/nhooyr.io/websocket/doc.go b/vendor/nhooyr.io/websocket/doc.go
new file mode 100644
index 00000000..efa920e3
--- /dev/null
+++ b/vendor/nhooyr.io/websocket/doc.go
@@ -0,0 +1,32 @@
+// +build !js
+
+// Package websocket implements the RFC 6455 WebSocket protocol.
+//
+// https://tools.ietf.org/html/rfc6455
+//
+// Use Dial to dial a WebSocket server.
+//
+// Use Accept to accept a WebSocket client.
+//
+// Conn represents the resulting WebSocket connection.
+//
+// The examples are the best way to understand how to correctly use the library.
+//
+// The wsjson and wspb subpackages contain helpers for JSON and protobuf messages.
+//
+// More documentation at https://nhooyr.io/websocket.
+//
+// Wasm
+//
+// The client side supports compiling to Wasm.
+// It wraps the WebSocket browser API.
+//
+// See https://developer.mozilla.org/en-US/docs/Web/API/WebSocket
+//
+// Some important caveats to be aware of:
+//
+// - Accept always errors out
+// - Conn.Ping is no-op
+// - HTTPClient, HTTPHeader and CompressionMode in DialOptions are no-op
+// - *http.Response from Dial is &http.Response{} with a 101 status code on success
+package websocket // import "nhooyr.io/websocket"
diff --git a/vendor/nhooyr.io/websocket/frame.go b/vendor/nhooyr.io/websocket/frame.go
new file mode 100644
index 00000000..2a036f94
--- /dev/null
+++ b/vendor/nhooyr.io/websocket/frame.go
@@ -0,0 +1,294 @@
+package websocket
+
+import (
+ "bufio"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+ "math/bits"
+
+ "nhooyr.io/websocket/internal/errd"
+)
+
+// opcode represents a WebSocket opcode.
+type opcode int
+
+// https://tools.ietf.org/html/rfc6455#section-11.8.
+const (
+ opContinuation opcode = iota
+ opText
+ opBinary
+ // 3 - 7 are reserved for further non-control frames.
+ _
+ _
+ _
+ _
+ _
+ opClose
+ opPing
+ opPong
+ // 11-16 are reserved for further control frames.
+)
+
+// header represents a WebSocket frame header.
+// See https://tools.ietf.org/html/rfc6455#section-5.2.
+type header struct {
+ fin bool
+ rsv1 bool
+ rsv2 bool
+ rsv3 bool
+ opcode opcode
+
+ payloadLength int64
+
+ masked bool
+ maskKey uint32
+}
+
+// readFrameHeader reads a header from the reader.
+// See https://tools.ietf.org/html/rfc6455#section-5.2.
+func readFrameHeader(r *bufio.Reader, readBuf []byte) (h header, err error) {
+ defer errd.Wrap(&err, "failed to read frame header")
+
+ b, err := r.ReadByte()
+ if err != nil {
+ return header{}, err
+ }
+
+ h.fin = b&(1<<7) != 0
+ h.rsv1 = b&(1<<6) != 0
+ h.rsv2 = b&(1<<5) != 0
+ h.rsv3 = b&(1<<4) != 0
+
+ h.opcode = opcode(b & 0xf)
+
+ b, err = r.ReadByte()
+ if err != nil {
+ return header{}, err
+ }
+
+ h.masked = b&(1<<7) != 0
+
+ payloadLength := b &^ (1 << 7)
+ switch {
+ case payloadLength < 126:
+ h.payloadLength = int64(payloadLength)
+ case payloadLength == 126:
+ _, err = io.ReadFull(r, readBuf[:2])
+ h.payloadLength = int64(binary.BigEndian.Uint16(readBuf))
+ case payloadLength == 127:
+ _, err = io.ReadFull(r, readBuf)
+ h.payloadLength = int64(binary.BigEndian.Uint64(readBuf))
+ }
+ if err != nil {
+ return header{}, err
+ }
+
+ if h.payloadLength < 0 {
+ return header{}, fmt.Errorf("received negative payload length: %v", h.payloadLength)
+ }
+
+ if h.masked {
+ _, err = io.ReadFull(r, readBuf[:4])
+ if err != nil {
+ return header{}, err
+ }
+ h.maskKey = binary.LittleEndian.Uint32(readBuf)
+ }
+
+ return h, nil
+}
+
+// maxControlPayload is the maximum length of a control frame payload.
+// See https://tools.ietf.org/html/rfc6455#section-5.5.
+const maxControlPayload = 125
+
+// writeFrameHeader writes the bytes of the header to w.
+// See https://tools.ietf.org/html/rfc6455#section-5.2
+func writeFrameHeader(h header, w *bufio.Writer, buf []byte) (err error) {
+ defer errd.Wrap(&err, "failed to write frame header")
+
+ var b byte
+ if h.fin {
+ b |= 1 << 7
+ }
+ if h.rsv1 {
+ b |= 1 << 6
+ }
+ if h.rsv2 {
+ b |= 1 << 5
+ }
+ if h.rsv3 {
+ b |= 1 << 4
+ }
+
+ b |= byte(h.opcode)
+
+ err = w.WriteByte(b)
+ if err != nil {
+ return err
+ }
+
+ lengthByte := byte(0)
+ if h.masked {
+ lengthByte |= 1 << 7
+ }
+
+ switch {
+ case h.payloadLength > math.MaxUint16:
+ lengthByte |= 127
+ case h.payloadLength > 125:
+ lengthByte |= 126
+ case h.payloadLength >= 0:
+ lengthByte |= byte(h.payloadLength)
+ }
+ err = w.WriteByte(lengthByte)
+ if err != nil {
+ return err
+ }
+
+ switch {
+ case h.payloadLength > math.MaxUint16:
+ binary.BigEndian.PutUint64(buf, uint64(h.payloadLength))
+ _, err = w.Write(buf)
+ case h.payloadLength > 125:
+ binary.BigEndian.PutUint16(buf, uint16(h.payloadLength))
+ _, err = w.Write(buf[:2])
+ }
+ if err != nil {
+ return err
+ }
+
+ if h.masked {
+ binary.LittleEndian.PutUint32(buf, h.maskKey)
+ _, err = w.Write(buf[:4])
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// mask applies the WebSocket masking algorithm to p
+// with the given key.
+// See https://tools.ietf.org/html/rfc6455#section-5.3
+//
+// The returned value is the correctly rotated key to
+// to continue to mask/unmask the message.
+//
+// It is optimized for LittleEndian and expects the key
+// to be in little endian.
+//
+// See https://github.com/golang/go/issues/31586
+func mask(key uint32, b []byte) uint32 {
+ if len(b) >= 8 {
+ key64 := uint64(key)<<32 | uint64(key)
+
+ // At some point in the future we can clean these unrolled loops up.
+ // See https://github.com/golang/go/issues/31586#issuecomment-487436401
+
+ // Then we xor until b is less than 128 bytes.
+ for len(b) >= 128 {
+ v := binary.LittleEndian.Uint64(b)
+ binary.LittleEndian.PutUint64(b, v^key64)
+ v = binary.LittleEndian.Uint64(b[8:16])
+ binary.LittleEndian.PutUint64(b[8:16], v^key64)
+ v = binary.LittleEndian.Uint64(b[16:24])
+ binary.LittleEndian.PutUint64(b[16:24], v^key64)
+ v = binary.LittleEndian.Uint64(b[24:32])
+ binary.LittleEndian.PutUint64(b[24:32], v^key64)
+ v = binary.LittleEndian.Uint64(b[32:40])
+ binary.LittleEndian.PutUint64(b[32:40], v^key64)
+ v = binary.LittleEndian.Uint64(b[40:48])
+ binary.LittleEndian.PutUint64(b[40:48], v^key64)
+ v = binary.LittleEndian.Uint64(b[48:56])
+ binary.LittleEndian.PutUint64(b[48:56], v^key64)
+ v = binary.LittleEndian.Uint64(b[56:64])
+ binary.LittleEndian.PutUint64(b[56:64], v^key64)
+ v = binary.LittleEndian.Uint64(b[64:72])
+ binary.LittleEndian.PutUint64(b[64:72], v^key64)
+ v = binary.LittleEndian.Uint64(b[72:80])
+ binary.LittleEndian.PutUint64(b[72:80], v^key64)
+ v = binary.LittleEndian.Uint64(b[80:88])
+ binary.LittleEndian.PutUint64(b[80:88], v^key64)
+ v = binary.LittleEndian.Uint64(b[88:96])
+ binary.LittleEndian.PutUint64(b[88:96], v^key64)
+ v = binary.LittleEndian.Uint64(b[96:104])
+ binary.LittleEndian.PutUint64(b[96:104], v^key64)
+ v = binary.LittleEndian.Uint64(b[104:112])
+ binary.LittleEndian.PutUint64(b[104:112], v^key64)
+ v = binary.LittleEndian.Uint64(b[112:120])
+ binary.LittleEndian.PutUint64(b[112:120], v^key64)
+ v = binary.LittleEndian.Uint64(b[120:128])
+ binary.LittleEndian.PutUint64(b[120:128], v^key64)
+ b = b[128:]
+ }
+
+ // Then we xor until b is less than 64 bytes.
+ for len(b) >= 64 {
+ v := binary.LittleEndian.Uint64(b)
+ binary.LittleEndian.PutUint64(b, v^key64)
+ v = binary.LittleEndian.Uint64(b[8:16])
+ binary.LittleEndian.PutUint64(b[8:16], v^key64)
+ v = binary.LittleEndian.Uint64(b[16:24])
+ binary.LittleEndian.PutUint64(b[16:24], v^key64)
+ v = binary.LittleEndian.Uint64(b[24:32])
+ binary.LittleEndian.PutUint64(b[24:32], v^key64)
+ v = binary.LittleEndian.Uint64(b[32:40])
+ binary.LittleEndian.PutUint64(b[32:40], v^key64)
+ v = binary.LittleEndian.Uint64(b[40:48])
+ binary.LittleEndian.PutUint64(b[40:48], v^key64)
+ v = binary.LittleEndian.Uint64(b[48:56])
+ binary.LittleEndian.PutUint64(b[48:56], v^key64)
+ v = binary.LittleEndian.Uint64(b[56:64])
+ binary.LittleEndian.PutUint64(b[56:64], v^key64)
+ b = b[64:]
+ }
+
+ // Then we xor until b is less than 32 bytes.
+ for len(b) >= 32 {
+ v := binary.LittleEndian.Uint64(b)
+ binary.LittleEndian.PutUint64(b, v^key64)
+ v = binary.LittleEndian.Uint64(b[8:16])
+ binary.LittleEndian.PutUint64(b[8:16], v^key64)
+ v = binary.LittleEndian.Uint64(b[16:24])
+ binary.LittleEndian.PutUint64(b[16:24], v^key64)
+ v = binary.LittleEndian.Uint64(b[24:32])
+ binary.LittleEndian.PutUint64(b[24:32], v^key64)
+ b = b[32:]
+ }
+
+ // Then we xor until b is less than 16 bytes.
+ for len(b) >= 16 {
+ v := binary.LittleEndian.Uint64(b)
+ binary.LittleEndian.PutUint64(b, v^key64)
+ v = binary.LittleEndian.Uint64(b[8:16])
+ binary.LittleEndian.PutUint64(b[8:16], v^key64)
+ b = b[16:]
+ }
+
+ // Then we xor until b is less than 8 bytes.
+ for len(b) >= 8 {
+ v := binary.LittleEndian.Uint64(b)
+ binary.LittleEndian.PutUint64(b, v^key64)
+ b = b[8:]
+ }
+ }
+
+ // Then we xor until b is less than 4 bytes.
+ for len(b) >= 4 {
+ v := binary.LittleEndian.Uint32(b)
+ binary.LittleEndian.PutUint32(b, v^key)
+ b = b[4:]
+ }
+
+ // xor remaining bytes.
+ for i := range b {
+ b[i] ^= byte(key)
+ key = bits.RotateLeft32(key, -8)
+ }
+
+ return key
+}
diff --git a/vendor/nhooyr.io/websocket/internal/bpool/bpool.go b/vendor/nhooyr.io/websocket/internal/bpool/bpool.go
new file mode 100644
index 00000000..aa826fba
--- /dev/null
+++ b/vendor/nhooyr.io/websocket/internal/bpool/bpool.go
@@ -0,0 +1,24 @@
+package bpool
+
+import (
+ "bytes"
+ "sync"
+)
+
+var bpool sync.Pool
+
+// Get returns a buffer from the pool or creates a new one if
+// the pool is empty.
+func Get() *bytes.Buffer {
+ b := bpool.Get()
+ if b == nil {
+ return &bytes.Buffer{}
+ }
+ return b.(*bytes.Buffer)
+}
+
+// Put returns a buffer into the pool.
+func Put(b *bytes.Buffer) {
+ b.Reset()
+ bpool.Put(b)
+}
diff --git a/vendor/nhooyr.io/websocket/internal/errd/wrap.go b/vendor/nhooyr.io/websocket/internal/errd/wrap.go
new file mode 100644
index 00000000..6e779131
--- /dev/null
+++ b/vendor/nhooyr.io/websocket/internal/errd/wrap.go
@@ -0,0 +1,14 @@
+package errd
+
+import (
+ "fmt"
+)
+
+// Wrap wraps err with fmt.Errorf if err is non nil.
+// Intended for use with defer and a named error return.
+// Inspired by https://github.com/golang/go/issues/32676.
+func Wrap(err *error, f string, v ...interface{}) {
+ if *err != nil {
+ *err = fmt.Errorf(f+": %w", append(v, *err)...)
+ }
+}
diff --git a/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go b/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go
new file mode 100644
index 00000000..26ffb456
--- /dev/null
+++ b/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go
@@ -0,0 +1,170 @@
+// +build js
+
+// Package wsjs implements typed access to the browser javascript WebSocket API.
+//
+// https://developer.mozilla.org/en-US/docs/Web/API/WebSocket
+package wsjs
+
+import (
+ "syscall/js"
+)
+
+func handleJSError(err *error, onErr func()) {
+ r := recover()
+
+ if jsErr, ok := r.(js.Error); ok {
+ *err = jsErr
+
+ if onErr != nil {
+ onErr()
+ }
+ return
+ }
+
+ if r != nil {
+ panic(r)
+ }
+}
+
+// New is a wrapper around the javascript WebSocket constructor.
+func New(url string, protocols []string) (c WebSocket, err error) {
+ defer handleJSError(&err, func() {
+ c = WebSocket{}
+ })
+
+ jsProtocols := make([]interface{}, len(protocols))
+ for i, p := range protocols {
+ jsProtocols[i] = p
+ }
+
+ c = WebSocket{
+ v: js.Global().Get("WebSocket").New(url, jsProtocols),
+ }
+
+ c.setBinaryType("arraybuffer")
+
+ return c, nil
+}
+
+// WebSocket is a wrapper around a javascript WebSocket object.
+type WebSocket struct {
+ v js.Value
+}
+
+func (c WebSocket) setBinaryType(typ string) {
+ c.v.Set("binaryType", string(typ))
+}
+
+func (c WebSocket) addEventListener(eventType string, fn func(e js.Value)) func() {
+ f := js.FuncOf(func(this js.Value, args []js.Value) interface{} {
+ fn(args[0])
+ return nil
+ })
+ c.v.Call("addEventListener", eventType, f)
+
+ return func() {
+ c.v.Call("removeEventListener", eventType, f)
+ f.Release()
+ }
+}
+
+// CloseEvent is the type passed to a WebSocket close handler.
+type CloseEvent struct {
+ Code uint16
+ Reason string
+ WasClean bool
+}
+
+// OnClose registers a function to be called when the WebSocket is closed.
+func (c WebSocket) OnClose(fn func(CloseEvent)) (remove func()) {
+ return c.addEventListener("close", func(e js.Value) {
+ ce := CloseEvent{
+ Code: uint16(e.Get("code").Int()),
+ Reason: e.Get("reason").String(),
+ WasClean: e.Get("wasClean").Bool(),
+ }
+ fn(ce)
+ })
+}
+
+// OnError registers a function to be called when there is an error
+// with the WebSocket.
+func (c WebSocket) OnError(fn func(e js.Value)) (remove func()) {
+ return c.addEventListener("error", fn)
+}
+
+// MessageEvent is the type passed to a message handler.
+type MessageEvent struct {
+ // string or []byte.
+ Data interface{}
+
+ // There are more fields to the interface but we don't use them.
+ // See https://developer.mozilla.org/en-US/docs/Web/API/MessageEvent
+}
+
+// OnMessage registers a function to be called when the WebSocket receives a message.
+func (c WebSocket) OnMessage(fn func(m MessageEvent)) (remove func()) {
+ return c.addEventListener("message", func(e js.Value) {
+ var data interface{}
+
+ arrayBuffer := e.Get("data")
+ if arrayBuffer.Type() == js.TypeString {
+ data = arrayBuffer.String()
+ } else {
+ data = extractArrayBuffer(arrayBuffer)
+ }
+
+ me := MessageEvent{
+ Data: data,
+ }
+ fn(me)
+
+ return
+ })
+}
+
+// Subprotocol returns the WebSocket subprotocol in use.
+func (c WebSocket) Subprotocol() string {
+ return c.v.Get("protocol").String()
+}
+
+// OnOpen registers a function to be called when the WebSocket is opened.
+func (c WebSocket) OnOpen(fn func(e js.Value)) (remove func()) {
+ return c.addEventListener("open", fn)
+}
+
+// Close closes the WebSocket with the given code and reason.
+func (c WebSocket) Close(code int, reason string) (err error) {
+ defer handleJSError(&err, nil)
+ c.v.Call("close", code, reason)
+ return err
+}
+
+// SendText sends the given string as a text message
+// on the WebSocket.
+func (c WebSocket) SendText(v string) (err error) {
+ defer handleJSError(&err, nil)
+ c.v.Call("send", v)
+ return err
+}
+
+// SendBytes sends the given message as a binary message
+// on the WebSocket.
+func (c WebSocket) SendBytes(v []byte) (err error) {
+ defer handleJSError(&err, nil)
+ c.v.Call("send", uint8Array(v))
+ return err
+}
+
+func extractArrayBuffer(arrayBuffer js.Value) []byte {
+ uint8Array := js.Global().Get("Uint8Array").New(arrayBuffer)
+ dst := make([]byte, uint8Array.Length())
+ js.CopyBytesToGo(dst, uint8Array)
+ return dst
+}
+
+func uint8Array(src []byte) js.Value {
+ uint8Array := js.Global().Get("Uint8Array").New(len(src))
+ js.CopyBytesToJS(uint8Array, src)
+ return uint8Array
+}
diff --git a/vendor/nhooyr.io/websocket/internal/xsync/go.go b/vendor/nhooyr.io/websocket/internal/xsync/go.go
new file mode 100644
index 00000000..7a61f27f
--- /dev/null
+++ b/vendor/nhooyr.io/websocket/internal/xsync/go.go
@@ -0,0 +1,25 @@
+package xsync
+
+import (
+ "fmt"
+)
+
+// Go allows running a function in another goroutine
+// and waiting for its error.
+func Go(fn func() error) <-chan error {
+ errs := make(chan error, 1)
+ go func() {
+ defer func() {
+ r := recover()
+ if r != nil {
+ select {
+ case errs <- fmt.Errorf("panic in go fn: %v", r):
+ default:
+ }
+ }
+ }()
+ errs <- fn()
+ }()
+
+ return errs
+}
diff --git a/vendor/nhooyr.io/websocket/internal/xsync/int64.go b/vendor/nhooyr.io/websocket/internal/xsync/int64.go
new file mode 100644
index 00000000..a0c40204
--- /dev/null
+++ b/vendor/nhooyr.io/websocket/internal/xsync/int64.go
@@ -0,0 +1,23 @@
+package xsync
+
+import (
+ "sync/atomic"
+)
+
+// Int64 represents an atomic int64.
+type Int64 struct {
+ // We do not use atomic.Load/StoreInt64 since it does not
+ // work on 32 bit computers but we need 64 bit integers.
+ i atomic.Value
+}
+
+// Load loads the int64.
+func (v *Int64) Load() int64 {
+ i, _ := v.i.Load().(int64)
+ return i
+}
+
+// Store stores the int64.
+func (v *Int64) Store(i int64) {
+ v.i.Store(i)
+}
diff --git a/vendor/nhooyr.io/websocket/netconn.go b/vendor/nhooyr.io/websocket/netconn.go
new file mode 100644
index 00000000..64aadf0b
--- /dev/null
+++ b/vendor/nhooyr.io/websocket/netconn.go
@@ -0,0 +1,166 @@
+package websocket
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "math"
+ "net"
+ "sync"
+ "time"
+)
+
+// NetConn converts a *websocket.Conn into a net.Conn.
+//
+// It's for tunneling arbitrary protocols over WebSockets.
+// Few users of the library will need this but it's tricky to implement
+// correctly and so provided in the library.
+// See https://github.com/nhooyr/websocket/issues/100.
+//
+// Every Write to the net.Conn will correspond to a message write of
+// the given type on *websocket.Conn.
+//
+// The passed ctx bounds the lifetime of the net.Conn. If cancelled,
+// all reads and writes on the net.Conn will be cancelled.
+//
+// If a message is read that is not of the correct type, the connection
+// will be closed with StatusUnsupportedData and an error will be returned.
+//
+// Close will close the *websocket.Conn with StatusNormalClosure.
+//
+// When a deadline is hit, the connection will be closed. This is
+// different from most net.Conn implementations where only the
+// reading/writing goroutines are interrupted but the connection is kept alive.
+//
+// The Addr methods will return a mock net.Addr that returns "websocket" for Network
+// and "websocket/unknown-addr" for String.
+//
+// A received StatusNormalClosure or StatusGoingAway close frame will be translated to
+// io.EOF when reading.
+func NetConn(ctx context.Context, c *Conn, msgType MessageType) net.Conn {
+ nc := &netConn{
+ c: c,
+ msgType: msgType,
+ }
+
+ var cancel context.CancelFunc
+ nc.writeContext, cancel = context.WithCancel(ctx)
+ nc.writeTimer = time.AfterFunc(math.MaxInt64, cancel)
+ if !nc.writeTimer.Stop() {
+ <-nc.writeTimer.C
+ }
+
+ nc.readContext, cancel = context.WithCancel(ctx)
+ nc.readTimer = time.AfterFunc(math.MaxInt64, cancel)
+ if !nc.readTimer.Stop() {
+ <-nc.readTimer.C
+ }
+
+ return nc
+}
+
+type netConn struct {
+ c *Conn
+ msgType MessageType
+
+ writeTimer *time.Timer
+ writeContext context.Context
+
+ readTimer *time.Timer
+ readContext context.Context
+
+ readMu sync.Mutex
+ eofed bool
+ reader io.Reader
+}
+
+var _ net.Conn = &netConn{}
+
+func (c *netConn) Close() error {
+ return c.c.Close(StatusNormalClosure, "")
+}
+
+func (c *netConn) Write(p []byte) (int, error) {
+ err := c.c.Write(c.writeContext, c.msgType, p)
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+}
+
+func (c *netConn) Read(p []byte) (int, error) {
+ c.readMu.Lock()
+ defer c.readMu.Unlock()
+
+ if c.eofed {
+ return 0, io.EOF
+ }
+
+ if c.reader == nil {
+ typ, r, err := c.c.Reader(c.readContext)
+ if err != nil {
+ switch CloseStatus(err) {
+ case StatusNormalClosure, StatusGoingAway:
+ c.eofed = true
+ return 0, io.EOF
+ }
+ return 0, err
+ }
+ if typ != c.msgType {
+ err := fmt.Errorf("unexpected frame type read (expected %v): %v", c.msgType, typ)
+ c.c.Close(StatusUnsupportedData, err.Error())
+ return 0, err
+ }
+ c.reader = r
+ }
+
+ n, err := c.reader.Read(p)
+ if err == io.EOF {
+ c.reader = nil
+ err = nil
+ }
+ return n, err
+}
+
+type websocketAddr struct {
+}
+
+func (a websocketAddr) Network() string {
+ return "websocket"
+}
+
+func (a websocketAddr) String() string {
+ return "websocket/unknown-addr"
+}
+
+func (c *netConn) RemoteAddr() net.Addr {
+ return websocketAddr{}
+}
+
+func (c *netConn) LocalAddr() net.Addr {
+ return websocketAddr{}
+}
+
+func (c *netConn) SetDeadline(t time.Time) error {
+ c.SetWriteDeadline(t)
+ c.SetReadDeadline(t)
+ return nil
+}
+
+func (c *netConn) SetWriteDeadline(t time.Time) error {
+ if t.IsZero() {
+ c.writeTimer.Stop()
+ } else {
+ c.writeTimer.Reset(t.Sub(time.Now()))
+ }
+ return nil
+}
+
+func (c *netConn) SetReadDeadline(t time.Time) error {
+ if t.IsZero() {
+ c.readTimer.Stop()
+ } else {
+ c.readTimer.Reset(t.Sub(time.Now()))
+ }
+ return nil
+}
diff --git a/vendor/nhooyr.io/websocket/read.go b/vendor/nhooyr.io/websocket/read.go
new file mode 100644
index 00000000..ae05cf93
--- /dev/null
+++ b/vendor/nhooyr.io/websocket/read.go
@@ -0,0 +1,474 @@
+// +build !js
+
+package websocket
+
+import (
+ "bufio"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "strings"
+ "time"
+
+ "nhooyr.io/websocket/internal/errd"
+ "nhooyr.io/websocket/internal/xsync"
+)
+
+// Reader reads from the connection until until there is a WebSocket
+// data message to be read. It will handle ping, pong and close frames as appropriate.
+//
+// It returns the type of the message and an io.Reader to read it.
+// The passed context will also bound the reader.
+// Ensure you read to EOF otherwise the connection will hang.
+//
+// Call CloseRead if you do not expect any data messages from the peer.
+//
+// Only one Reader may be open at a time.
+func (c *Conn) Reader(ctx context.Context) (MessageType, io.Reader, error) {
+ return c.reader(ctx)
+}
+
+// Read is a convenience method around Reader to read a single message
+// from the connection.
+func (c *Conn) Read(ctx context.Context) (MessageType, []byte, error) {
+ typ, r, err := c.Reader(ctx)
+ if err != nil {
+ return 0, nil, err
+ }
+
+ b, err := ioutil.ReadAll(r)
+ return typ, b, err
+}
+
+// CloseRead starts a goroutine to read from the connection until it is closed
+// or a data message is received.
+//
+// Once CloseRead is called you cannot read any messages from the connection.
+// The returned context will be cancelled when the connection is closed.
+//
+// If a data message is received, the connection will be closed with StatusPolicyViolation.
+//
+// Call CloseRead when you do not expect to read any more messages.
+// Since it actively reads from the connection, it will ensure that ping, pong and close
+// frames are responded to. This means c.Ping and c.Close will still work as expected.
+func (c *Conn) CloseRead(ctx context.Context) context.Context {
+ ctx, cancel := context.WithCancel(ctx)
+ go func() {
+ defer cancel()
+ c.Reader(ctx)
+ c.Close(StatusPolicyViolation, "unexpected data message")
+ }()
+ return ctx
+}
+
+// SetReadLimit sets the max number of bytes to read for a single message.
+// It applies to the Reader and Read methods.
+//
+// By default, the connection has a message read limit of 32768 bytes.
+//
+// When the limit is hit, the connection will be closed with StatusMessageTooBig.
+func (c *Conn) SetReadLimit(n int64) {
+ // We add read one more byte than the limit in case
+ // there is a fin frame that needs to be read.
+ c.msgReader.limitReader.limit.Store(n + 1)
+}
+
+const defaultReadLimit = 32768
+
+func newMsgReader(c *Conn) *msgReader {
+ mr := &msgReader{
+ c: c,
+ fin: true,
+ }
+ mr.readFunc = mr.read
+
+ mr.limitReader = newLimitReader(c, mr.readFunc, defaultReadLimit+1)
+ return mr
+}
+
+func (mr *msgReader) resetFlate() {
+ if mr.flateContextTakeover() {
+ mr.dict.init(32768)
+ }
+ if mr.flateBufio == nil {
+ mr.flateBufio = getBufioReader(mr.readFunc)
+ }
+
+ mr.flateReader = getFlateReader(mr.flateBufio, mr.dict.buf)
+ mr.limitReader.r = mr.flateReader
+ mr.flateTail.Reset(deflateMessageTail)
+}
+
+func (mr *msgReader) putFlateReader() {
+ if mr.flateReader != nil {
+ putFlateReader(mr.flateReader)
+ mr.flateReader = nil
+ }
+}
+
+func (mr *msgReader) close() {
+ mr.c.readMu.forceLock()
+ mr.putFlateReader()
+ mr.dict.close()
+ if mr.flateBufio != nil {
+ putBufioReader(mr.flateBufio)
+ }
+
+ if mr.c.client {
+ putBufioReader(mr.c.br)
+ mr.c.br = nil
+ }
+}
+
+func (mr *msgReader) flateContextTakeover() bool {
+ if mr.c.client {
+ return !mr.c.copts.serverNoContextTakeover
+ }
+ return !mr.c.copts.clientNoContextTakeover
+}
+
+func (c *Conn) readRSV1Illegal(h header) bool {
+ // If compression is disabled, rsv1 is illegal.
+ if !c.flate() {
+ return true
+ }
+ // rsv1 is only allowed on data frames beginning messages.
+ if h.opcode != opText && h.opcode != opBinary {
+ return true
+ }
+ return false
+}
+
+func (c *Conn) readLoop(ctx context.Context) (header, error) {
+ for {
+ h, err := c.readFrameHeader(ctx)
+ if err != nil {
+ return header{}, err
+ }
+
+ if h.rsv1 && c.readRSV1Illegal(h) || h.rsv2 || h.rsv3 {
+ err := fmt.Errorf("received header with unexpected rsv bits set: %v:%v:%v", h.rsv1, h.rsv2, h.rsv3)
+ c.writeError(StatusProtocolError, err)
+ return header{}, err
+ }
+
+ if !c.client && !h.masked {
+ return header{}, errors.New("received unmasked frame from client")
+ }
+
+ switch h.opcode {
+ case opClose, opPing, opPong:
+ err = c.handleControl(ctx, h)
+ if err != nil {
+ // Pass through CloseErrors when receiving a close frame.
+ if h.opcode == opClose && CloseStatus(err) != -1 {
+ return header{}, err
+ }
+ return header{}, fmt.Errorf("failed to handle control frame %v: %w", h.opcode, err)
+ }
+ case opContinuation, opText, opBinary:
+ return h, nil
+ default:
+ err := fmt.Errorf("received unknown opcode %v", h.opcode)
+ c.writeError(StatusProtocolError, err)
+ return header{}, err
+ }
+ }
+}
+
+func (c *Conn) readFrameHeader(ctx context.Context) (header, error) {
+ select {
+ case <-c.closed:
+ return header{}, c.closeErr
+ case c.readTimeout <- ctx:
+ }
+
+ h, err := readFrameHeader(c.br, c.readHeaderBuf[:])
+ if err != nil {
+ select {
+ case <-c.closed:
+ return header{}, c.closeErr
+ case <-ctx.Done():
+ return header{}, ctx.Err()
+ default:
+ c.close(err)
+ return header{}, err
+ }
+ }
+
+ select {
+ case <-c.closed:
+ return header{}, c.closeErr
+ case c.readTimeout <- context.Background():
+ }
+
+ return h, nil
+}
+
+func (c *Conn) readFramePayload(ctx context.Context, p []byte) (int, error) {
+ select {
+ case <-c.closed:
+ return 0, c.closeErr
+ case c.readTimeout <- ctx:
+ }
+
+ n, err := io.ReadFull(c.br, p)
+ if err != nil {
+ select {
+ case <-c.closed:
+ return n, c.closeErr
+ case <-ctx.Done():
+ return n, ctx.Err()
+ default:
+ err = fmt.Errorf("failed to read frame payload: %w", err)
+ c.close(err)
+ return n, err
+ }
+ }
+
+ select {
+ case <-c.closed:
+ return n, c.closeErr
+ case c.readTimeout <- context.Background():
+ }
+
+ return n, err
+}
+
+func (c *Conn) handleControl(ctx context.Context, h header) (err error) {
+ if h.payloadLength < 0 || h.payloadLength > maxControlPayload {
+ err := fmt.Errorf("received control frame payload with invalid length: %d", h.payloadLength)
+ c.writeError(StatusProtocolError, err)
+ return err
+ }
+
+ if !h.fin {
+ err := errors.New("received fragmented control frame")
+ c.writeError(StatusProtocolError, err)
+ return err
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, time.Second*5)
+ defer cancel()
+
+ b := c.readControlBuf[:h.payloadLength]
+ _, err = c.readFramePayload(ctx, b)
+ if err != nil {
+ return err
+ }
+
+ if h.masked {
+ mask(h.maskKey, b)
+ }
+
+ switch h.opcode {
+ case opPing:
+ return c.writeControl(ctx, opPong, b)
+ case opPong:
+ c.activePingsMu.Lock()
+ pong, ok := c.activePings[string(b)]
+ c.activePingsMu.Unlock()
+ if ok {
+ select {
+ case pong <- struct{}{}:
+ default:
+ }
+ }
+ return nil
+ }
+
+ defer func() {
+ c.readCloseFrameErr = err
+ }()
+
+ ce, err := parseClosePayload(b)
+ if err != nil {
+ err = fmt.Errorf("received invalid close payload: %w", err)
+ c.writeError(StatusProtocolError, err)
+ return err
+ }
+
+ err = fmt.Errorf("received close frame: %w", ce)
+ c.setCloseErr(err)
+ c.writeClose(ce.Code, ce.Reason)
+ c.close(err)
+ return err
+}
+
+func (c *Conn) reader(ctx context.Context) (_ MessageType, _ io.Reader, err error) {
+ defer errd.Wrap(&err, "failed to get reader")
+
+ err = c.readMu.lock(ctx)
+ if err != nil {
+ return 0, nil, err
+ }
+ defer c.readMu.unlock()
+
+ if !c.msgReader.fin {
+ err = errors.New("previous message not read to completion")
+ c.close(fmt.Errorf("failed to get reader: %w", err))
+ return 0, nil, err
+ }
+
+ h, err := c.readLoop(ctx)
+ if err != nil {
+ return 0, nil, err
+ }
+
+ if h.opcode == opContinuation {
+ err := errors.New("received continuation frame without text or binary frame")
+ c.writeError(StatusProtocolError, err)
+ return 0, nil, err
+ }
+
+ c.msgReader.reset(ctx, h)
+
+ return MessageType(h.opcode), c.msgReader, nil
+}
+
+type msgReader struct {
+ c *Conn
+
+ ctx context.Context
+ flate bool
+ flateReader io.Reader
+ flateBufio *bufio.Reader
+ flateTail strings.Reader
+ limitReader *limitReader
+ dict slidingWindow
+
+ fin bool
+ payloadLength int64
+ maskKey uint32
+
+ // readerFunc(mr.Read) to avoid continuous allocations.
+ readFunc readerFunc
+}
+
+func (mr *msgReader) reset(ctx context.Context, h header) {
+ mr.ctx = ctx
+ mr.flate = h.rsv1
+ mr.limitReader.reset(mr.readFunc)
+
+ if mr.flate {
+ mr.resetFlate()
+ }
+
+ mr.setFrame(h)
+}
+
+func (mr *msgReader) setFrame(h header) {
+ mr.fin = h.fin
+ mr.payloadLength = h.payloadLength
+ mr.maskKey = h.maskKey
+}
+
+func (mr *msgReader) Read(p []byte) (n int, err error) {
+ err = mr.c.readMu.lock(mr.ctx)
+ if err != nil {
+ return 0, fmt.Errorf("failed to read: %w", err)
+ }
+ defer mr.c.readMu.unlock()
+
+ n, err = mr.limitReader.Read(p)
+ if mr.flate && mr.flateContextTakeover() {
+ p = p[:n]
+ mr.dict.write(p)
+ }
+ if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) && mr.fin && mr.flate {
+ mr.putFlateReader()
+ return n, io.EOF
+ }
+ if err != nil {
+ err = fmt.Errorf("failed to read: %w", err)
+ mr.c.close(err)
+ }
+ return n, err
+}
+
+func (mr *msgReader) read(p []byte) (int, error) {
+ for {
+ if mr.payloadLength == 0 {
+ if mr.fin {
+ if mr.flate {
+ return mr.flateTail.Read(p)
+ }
+ return 0, io.EOF
+ }
+
+ h, err := mr.c.readLoop(mr.ctx)
+ if err != nil {
+ return 0, err
+ }
+ if h.opcode != opContinuation {
+ err := errors.New("received new data message without finishing the previous message")
+ mr.c.writeError(StatusProtocolError, err)
+ return 0, err
+ }
+ mr.setFrame(h)
+
+ continue
+ }
+
+ if int64(len(p)) > mr.payloadLength {
+ p = p[:mr.payloadLength]
+ }
+
+ n, err := mr.c.readFramePayload(mr.ctx, p)
+ if err != nil {
+ return n, err
+ }
+
+ mr.payloadLength -= int64(n)
+
+ if !mr.c.client {
+ mr.maskKey = mask(mr.maskKey, p)
+ }
+
+ return n, nil
+ }
+}
+
+type limitReader struct {
+ c *Conn
+ r io.Reader
+ limit xsync.Int64
+ n int64
+}
+
+func newLimitReader(c *Conn, r io.Reader, limit int64) *limitReader {
+ lr := &limitReader{
+ c: c,
+ }
+ lr.limit.Store(limit)
+ lr.reset(r)
+ return lr
+}
+
+func (lr *limitReader) reset(r io.Reader) {
+ lr.n = lr.limit.Load()
+ lr.r = r
+}
+
+func (lr *limitReader) Read(p []byte) (int, error) {
+ if lr.n <= 0 {
+ err := fmt.Errorf("read limited at %v bytes", lr.limit.Load())
+ lr.c.writeError(StatusMessageTooBig, err)
+ return 0, err
+ }
+
+ if int64(len(p)) > lr.n {
+ p = p[:lr.n]
+ }
+ n, err := lr.r.Read(p)
+ lr.n -= int64(n)
+ return n, err
+}
+
+type readerFunc func(p []byte) (int, error)
+
+func (f readerFunc) Read(p []byte) (int, error) {
+ return f(p)
+}
diff --git a/vendor/nhooyr.io/websocket/stringer.go b/vendor/nhooyr.io/websocket/stringer.go
new file mode 100644
index 00000000..5a66ba29
--- /dev/null
+++ b/vendor/nhooyr.io/websocket/stringer.go
@@ -0,0 +1,91 @@
+// Code generated by "stringer -type=opcode,MessageType,StatusCode -output=stringer.go"; DO NOT EDIT.
+
+package websocket
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[opContinuation-0]
+ _ = x[opText-1]
+ _ = x[opBinary-2]
+ _ = x[opClose-8]
+ _ = x[opPing-9]
+ _ = x[opPong-10]
+}
+
+const (
+ _opcode_name_0 = "opContinuationopTextopBinary"
+ _opcode_name_1 = "opCloseopPingopPong"
+)
+
+var (
+ _opcode_index_0 = [...]uint8{0, 14, 20, 28}
+ _opcode_index_1 = [...]uint8{0, 7, 13, 19}
+)
+
+func (i opcode) String() string {
+ switch {
+ case 0 <= i && i <= 2:
+ return _opcode_name_0[_opcode_index_0[i]:_opcode_index_0[i+1]]
+ case 8 <= i && i <= 10:
+ i -= 8
+ return _opcode_name_1[_opcode_index_1[i]:_opcode_index_1[i+1]]
+ default:
+ return "opcode(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[MessageText-1]
+ _ = x[MessageBinary-2]
+}
+
+const _MessageType_name = "MessageTextMessageBinary"
+
+var _MessageType_index = [...]uint8{0, 11, 24}
+
+func (i MessageType) String() string {
+ i -= 1
+ if i < 0 || i >= MessageType(len(_MessageType_index)-1) {
+ return "MessageType(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _MessageType_name[_MessageType_index[i]:_MessageType_index[i+1]]
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[StatusNormalClosure-1000]
+ _ = x[StatusGoingAway-1001]
+ _ = x[StatusProtocolError-1002]
+ _ = x[StatusUnsupportedData-1003]
+ _ = x[statusReserved-1004]
+ _ = x[StatusNoStatusRcvd-1005]
+ _ = x[StatusAbnormalClosure-1006]
+ _ = x[StatusInvalidFramePayloadData-1007]
+ _ = x[StatusPolicyViolation-1008]
+ _ = x[StatusMessageTooBig-1009]
+ _ = x[StatusMandatoryExtension-1010]
+ _ = x[StatusInternalError-1011]
+ _ = x[StatusServiceRestart-1012]
+ _ = x[StatusTryAgainLater-1013]
+ _ = x[StatusBadGateway-1014]
+ _ = x[StatusTLSHandshake-1015]
+}
+
+const _StatusCode_name = "StatusNormalClosureStatusGoingAwayStatusProtocolErrorStatusUnsupportedDatastatusReservedStatusNoStatusRcvdStatusAbnormalClosureStatusInvalidFramePayloadDataStatusPolicyViolationStatusMessageTooBigStatusMandatoryExtensionStatusInternalErrorStatusServiceRestartStatusTryAgainLaterStatusBadGatewayStatusTLSHandshake"
+
+var _StatusCode_index = [...]uint16{0, 19, 34, 53, 74, 88, 106, 127, 156, 177, 196, 220, 239, 259, 278, 294, 312}
+
+func (i StatusCode) String() string {
+ i -= 1000
+ if i < 0 || i >= StatusCode(len(_StatusCode_index)-1) {
+ return "StatusCode(" + strconv.FormatInt(int64(i+1000), 10) + ")"
+ }
+ return _StatusCode_name[_StatusCode_index[i]:_StatusCode_index[i+1]]
+}
diff --git a/vendor/nhooyr.io/websocket/write.go b/vendor/nhooyr.io/websocket/write.go
new file mode 100644
index 00000000..2210cf81
--- /dev/null
+++ b/vendor/nhooyr.io/websocket/write.go
@@ -0,0 +1,397 @@
+// +build !js
+
+package websocket
+
+import (
+ "bufio"
+ "context"
+ "crypto/rand"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/klauspost/compress/flate"
+
+ "nhooyr.io/websocket/internal/errd"
+)
+
+// Writer returns a writer bounded by the context that will write
+// a WebSocket message of type dataType to the connection.
+//
+// You must close the writer once you have written the entire message.
+//
+// Only one writer can be open at a time, multiple calls will block until the previous writer
+// is closed.
+func (c *Conn) Writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) {
+ w, err := c.writer(ctx, typ)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get writer: %w", err)
+ }
+ return w, nil
+}
+
+// Write writes a message to the connection.
+//
+// See the Writer method if you want to stream a message.
+//
+// If compression is disabled or the threshold is not met, then it
+// will write the message in a single frame.
+func (c *Conn) Write(ctx context.Context, typ MessageType, p []byte) error {
+ _, err := c.write(ctx, typ, p)
+ if err != nil {
+ return fmt.Errorf("failed to write msg: %w", err)
+ }
+ return nil
+}
+
+type msgWriter struct {
+ mw *msgWriterState
+ closed bool
+}
+
+func (mw *msgWriter) Write(p []byte) (int, error) {
+ if mw.closed {
+ return 0, errors.New("cannot use closed writer")
+ }
+ return mw.mw.Write(p)
+}
+
+func (mw *msgWriter) Close() error {
+ if mw.closed {
+ return errors.New("cannot use closed writer")
+ }
+ mw.closed = true
+ return mw.mw.Close()
+}
+
+type msgWriterState struct {
+ c *Conn
+
+ mu *mu
+ writeMu *mu
+
+ ctx context.Context
+ opcode opcode
+ flate bool
+
+ trimWriter *trimLastFourBytesWriter
+ dict slidingWindow
+}
+
+func newMsgWriterState(c *Conn) *msgWriterState {
+ mw := &msgWriterState{
+ c: c,
+ mu: newMu(c),
+ writeMu: newMu(c),
+ }
+ return mw
+}
+
+func (mw *msgWriterState) ensureFlate() {
+ if mw.trimWriter == nil {
+ mw.trimWriter = &trimLastFourBytesWriter{
+ w: writerFunc(mw.write),
+ }
+ }
+
+ mw.dict.init(8192)
+ mw.flate = true
+}
+
+func (mw *msgWriterState) flateContextTakeover() bool {
+ if mw.c.client {
+ return !mw.c.copts.clientNoContextTakeover
+ }
+ return !mw.c.copts.serverNoContextTakeover
+}
+
+func (c *Conn) writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) {
+ err := c.msgWriterState.reset(ctx, typ)
+ if err != nil {
+ return nil, err
+ }
+ return &msgWriter{
+ mw: c.msgWriterState,
+ closed: false,
+ }, nil
+}
+
+func (c *Conn) write(ctx context.Context, typ MessageType, p []byte) (int, error) {
+ mw, err := c.writer(ctx, typ)
+ if err != nil {
+ return 0, err
+ }
+
+ if !c.flate() {
+ defer c.msgWriterState.mu.unlock()
+ return c.writeFrame(ctx, true, false, c.msgWriterState.opcode, p)
+ }
+
+ n, err := mw.Write(p)
+ if err != nil {
+ return n, err
+ }
+
+ err = mw.Close()
+ return n, err
+}
+
+func (mw *msgWriterState) reset(ctx context.Context, typ MessageType) error {
+ err := mw.mu.lock(ctx)
+ if err != nil {
+ return err
+ }
+
+ mw.ctx = ctx
+ mw.opcode = opcode(typ)
+ mw.flate = false
+
+ mw.trimWriter.reset()
+
+ return nil
+}
+
+// Write writes the given bytes to the WebSocket connection.
+func (mw *msgWriterState) Write(p []byte) (_ int, err error) {
+ err = mw.writeMu.lock(mw.ctx)
+ if err != nil {
+ return 0, fmt.Errorf("failed to write: %w", err)
+ }
+ defer mw.writeMu.unlock()
+
+ defer func() {
+ if err != nil {
+ err = fmt.Errorf("failed to write: %w", err)
+ mw.c.close(err)
+ }
+ }()
+
+ if mw.c.flate() {
+ // Only enables flate if the length crosses the
+ // threshold on the first frame
+ if mw.opcode != opContinuation && len(p) >= mw.c.flateThreshold {
+ mw.ensureFlate()
+ }
+ }
+
+ if mw.flate {
+ err = flate.StatelessDeflate(mw.trimWriter, p, false, mw.dict.buf)
+ if err != nil {
+ return 0, err
+ }
+ mw.dict.write(p)
+ return len(p), nil
+ }
+
+ return mw.write(p)
+}
+
+func (mw *msgWriterState) write(p []byte) (int, error) {
+ n, err := mw.c.writeFrame(mw.ctx, false, mw.flate, mw.opcode, p)
+ if err != nil {
+ return n, fmt.Errorf("failed to write data frame: %w", err)
+ }
+ mw.opcode = opContinuation
+ return n, nil
+}
+
+// Close flushes the frame to the connection.
+func (mw *msgWriterState) Close() (err error) {
+ defer errd.Wrap(&err, "failed to close writer")
+
+ err = mw.writeMu.lock(mw.ctx)
+ if err != nil {
+ return err
+ }
+ defer mw.writeMu.unlock()
+
+ _, err = mw.c.writeFrame(mw.ctx, true, mw.flate, mw.opcode, nil)
+ if err != nil {
+ return fmt.Errorf("failed to write fin frame: %w", err)
+ }
+
+ if mw.flate && !mw.flateContextTakeover() {
+ mw.dict.close()
+ }
+ mw.mu.unlock()
+ return nil
+}
+
+func (mw *msgWriterState) close() {
+ if mw.c.client {
+ mw.c.writeFrameMu.forceLock()
+ putBufioWriter(mw.c.bw)
+ }
+
+ mw.writeMu.forceLock()
+ mw.dict.close()
+}
+
+func (c *Conn) writeControl(ctx context.Context, opcode opcode, p []byte) error {
+ ctx, cancel := context.WithTimeout(ctx, time.Second*5)
+ defer cancel()
+
+ _, err := c.writeFrame(ctx, true, false, opcode, p)
+ if err != nil {
+ return fmt.Errorf("failed to write control frame %v: %w", opcode, err)
+ }
+ return nil
+}
+
+// frame handles all writes to the connection.
+func (c *Conn) writeFrame(ctx context.Context, fin bool, flate bool, opcode opcode, p []byte) (_ int, err error) {
+ err = c.writeFrameMu.lock(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer c.writeFrameMu.unlock()
+
+ // If the state says a close has already been written, we wait until
+ // the connection is closed and return that error.
+ //
+ // However, if the frame being written is a close, that means its the close from
+ // the state being set so we let it go through.
+ c.closeMu.Lock()
+ wroteClose := c.wroteClose
+ c.closeMu.Unlock()
+ if wroteClose && opcode != opClose {
+ select {
+ case <-ctx.Done():
+ return 0, ctx.Err()
+ case <-c.closed:
+ return 0, c.closeErr
+ }
+ }
+
+ select {
+ case <-c.closed:
+ return 0, c.closeErr
+ case c.writeTimeout <- ctx:
+ }
+
+ defer func() {
+ if err != nil {
+ select {
+ case <-c.closed:
+ err = c.closeErr
+ case <-ctx.Done():
+ err = ctx.Err()
+ }
+ c.close(err)
+ err = fmt.Errorf("failed to write frame: %w", err)
+ }
+ }()
+
+ c.writeHeader.fin = fin
+ c.writeHeader.opcode = opcode
+ c.writeHeader.payloadLength = int64(len(p))
+
+ if c.client {
+ c.writeHeader.masked = true
+ _, err = io.ReadFull(rand.Reader, c.writeHeaderBuf[:4])
+ if err != nil {
+ return 0, fmt.Errorf("failed to generate masking key: %w", err)
+ }
+ c.writeHeader.maskKey = binary.LittleEndian.Uint32(c.writeHeaderBuf[:])
+ }
+
+ c.writeHeader.rsv1 = false
+ if flate && (opcode == opText || opcode == opBinary) {
+ c.writeHeader.rsv1 = true
+ }
+
+ err = writeFrameHeader(c.writeHeader, c.bw, c.writeHeaderBuf[:])
+ if err != nil {
+ return 0, err
+ }
+
+ n, err := c.writeFramePayload(p)
+ if err != nil {
+ return n, err
+ }
+
+ if c.writeHeader.fin {
+ err = c.bw.Flush()
+ if err != nil {
+ return n, fmt.Errorf("failed to flush: %w", err)
+ }
+ }
+
+ select {
+ case <-c.closed:
+ return n, c.closeErr
+ case c.writeTimeout <- context.Background():
+ }
+
+ return n, nil
+}
+
+func (c *Conn) writeFramePayload(p []byte) (n int, err error) {
+ defer errd.Wrap(&err, "failed to write frame payload")
+
+ if !c.writeHeader.masked {
+ return c.bw.Write(p)
+ }
+
+ maskKey := c.writeHeader.maskKey
+ for len(p) > 0 {
+ // If the buffer is full, we need to flush.
+ if c.bw.Available() == 0 {
+ err = c.bw.Flush()
+ if err != nil {
+ return n, err
+ }
+ }
+
+ // Start of next write in the buffer.
+ i := c.bw.Buffered()
+
+ j := len(p)
+ if j > c.bw.Available() {
+ j = c.bw.Available()
+ }
+
+ _, err := c.bw.Write(p[:j])
+ if err != nil {
+ return n, err
+ }
+
+ maskKey = mask(maskKey, c.writeBuf[i:c.bw.Buffered()])
+
+ p = p[j:]
+ n += j
+ }
+
+ return n, nil
+}
+
+type writerFunc func(p []byte) (int, error)
+
+func (f writerFunc) Write(p []byte) (int, error) {
+ return f(p)
+}
+
+// extractBufioWriterBuf grabs the []byte backing a *bufio.Writer
+// and returns it.
+func extractBufioWriterBuf(bw *bufio.Writer, w io.Writer) []byte {
+ var writeBuf []byte
+ bw.Reset(writerFunc(func(p2 []byte) (int, error) {
+ writeBuf = p2[:cap(p2)]
+ return len(p2), nil
+ }))
+
+ bw.WriteByte(0)
+ bw.Flush()
+
+ bw.Reset(w)
+
+ return writeBuf
+}
+
+func (c *Conn) writeError(code StatusCode, err error) {
+ c.setCloseErr(err)
+ c.writeClose(code, err.Error())
+ c.close(nil)
+}
diff --git a/vendor/nhooyr.io/websocket/ws_js.go b/vendor/nhooyr.io/websocket/ws_js.go
new file mode 100644
index 00000000..b87e32cd
--- /dev/null
+++ b/vendor/nhooyr.io/websocket/ws_js.go
@@ -0,0 +1,379 @@
+package websocket // import "nhooyr.io/websocket"
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+ "syscall/js"
+
+ "nhooyr.io/websocket/internal/bpool"
+ "nhooyr.io/websocket/internal/wsjs"
+ "nhooyr.io/websocket/internal/xsync"
+)
+
+// Conn provides a wrapper around the browser WebSocket API.
+type Conn struct {
+ ws wsjs.WebSocket
+
+ // read limit for a message in bytes.
+ msgReadLimit xsync.Int64
+
+ closingMu sync.Mutex
+ isReadClosed xsync.Int64
+ closeOnce sync.Once
+ closed chan struct{}
+ closeErrOnce sync.Once
+ closeErr error
+ closeWasClean bool
+
+ releaseOnClose func()
+ releaseOnMessage func()
+
+ readSignal chan struct{}
+ readBufMu sync.Mutex
+ readBuf []wsjs.MessageEvent
+}
+
+func (c *Conn) close(err error, wasClean bool) {
+ c.closeOnce.Do(func() {
+ runtime.SetFinalizer(c, nil)
+
+ if !wasClean {
+ err = fmt.Errorf("unclean connection close: %w", err)
+ }
+ c.setCloseErr(err)
+ c.closeWasClean = wasClean
+ close(c.closed)
+ })
+}
+
+func (c *Conn) init() {
+ c.closed = make(chan struct{})
+ c.readSignal = make(chan struct{}, 1)
+
+ c.msgReadLimit.Store(32768)
+
+ c.releaseOnClose = c.ws.OnClose(func(e wsjs.CloseEvent) {
+ err := CloseError{
+ Code: StatusCode(e.Code),
+ Reason: e.Reason,
+ }
+ // We do not know if we sent or received this close as
+ // its possible the browser triggered it without us
+ // explicitly sending it.
+ c.close(err, e.WasClean)
+
+ c.releaseOnClose()
+ c.releaseOnMessage()
+ })
+
+ c.releaseOnMessage = c.ws.OnMessage(func(e wsjs.MessageEvent) {
+ c.readBufMu.Lock()
+ defer c.readBufMu.Unlock()
+
+ c.readBuf = append(c.readBuf, e)
+
+ // Lets the read goroutine know there is definitely something in readBuf.
+ select {
+ case c.readSignal <- struct{}{}:
+ default:
+ }
+ })
+
+ runtime.SetFinalizer(c, func(c *Conn) {
+ c.setCloseErr(errors.New("connection garbage collected"))
+ c.closeWithInternal()
+ })
+}
+
+func (c *Conn) closeWithInternal() {
+ c.Close(StatusInternalError, "something went wrong")
+}
+
+// Read attempts to read a message from the connection.
+// The maximum time spent waiting is bounded by the context.
+func (c *Conn) Read(ctx context.Context) (MessageType, []byte, error) {
+ if c.isReadClosed.Load() == 1 {
+ return 0, nil, errors.New("WebSocket connection read closed")
+ }
+
+ typ, p, err := c.read(ctx)
+ if err != nil {
+ return 0, nil, fmt.Errorf("failed to read: %w", err)
+ }
+ if int64(len(p)) > c.msgReadLimit.Load() {
+ err := fmt.Errorf("read limited at %v bytes", c.msgReadLimit.Load())
+ c.Close(StatusMessageTooBig, err.Error())
+ return 0, nil, err
+ }
+ return typ, p, nil
+}
+
+func (c *Conn) read(ctx context.Context) (MessageType, []byte, error) {
+ select {
+ case <-ctx.Done():
+ c.Close(StatusPolicyViolation, "read timed out")
+ return 0, nil, ctx.Err()
+ case <-c.readSignal:
+ case <-c.closed:
+ return 0, nil, c.closeErr
+ }
+
+ c.readBufMu.Lock()
+ defer c.readBufMu.Unlock()
+
+ me := c.readBuf[0]
+ // We copy the messages forward and decrease the size
+ // of the slice to avoid reallocating.
+ copy(c.readBuf, c.readBuf[1:])
+ c.readBuf = c.readBuf[:len(c.readBuf)-1]
+
+ if len(c.readBuf) > 0 {
+ // Next time we read, we'll grab the message.
+ select {
+ case c.readSignal <- struct{}{}:
+ default:
+ }
+ }
+
+ switch p := me.Data.(type) {
+ case string:
+ return MessageText, []byte(p), nil
+ case []byte:
+ return MessageBinary, p, nil
+ default:
+ panic("websocket: unexpected data type from wsjs OnMessage: " + reflect.TypeOf(me.Data).String())
+ }
+}
+
+// Ping is mocked out for Wasm.
+func (c *Conn) Ping(ctx context.Context) error {
+ return nil
+}
+
+// Write writes a message of the given type to the connection.
+// Always non blocking.
+func (c *Conn) Write(ctx context.Context, typ MessageType, p []byte) error {
+ err := c.write(ctx, typ, p)
+ if err != nil {
+ // Have to ensure the WebSocket is closed after a write error
+ // to match the Go API. It can only error if the message type
+ // is unexpected or the passed bytes contain invalid UTF-8 for
+ // MessageText.
+ err := fmt.Errorf("failed to write: %w", err)
+ c.setCloseErr(err)
+ c.closeWithInternal()
+ return err
+ }
+ return nil
+}
+
+func (c *Conn) write(ctx context.Context, typ MessageType, p []byte) error {
+ if c.isClosed() {
+ return c.closeErr
+ }
+ switch typ {
+ case MessageBinary:
+ return c.ws.SendBytes(p)
+ case MessageText:
+ return c.ws.SendText(string(p))
+ default:
+ return fmt.Errorf("unexpected message type: %v", typ)
+ }
+}
+
+// Close closes the WebSocket with the given code and reason.
+// It will wait until the peer responds with a close frame
+// or the connection is closed.
+// It thus performs the full WebSocket close handshake.
+func (c *Conn) Close(code StatusCode, reason string) error {
+ err := c.exportedClose(code, reason)
+ if err != nil {
+ return fmt.Errorf("failed to close WebSocket: %w", err)
+ }
+ return nil
+}
+
+func (c *Conn) exportedClose(code StatusCode, reason string) error {
+ c.closingMu.Lock()
+ defer c.closingMu.Unlock()
+
+ ce := fmt.Errorf("sent close: %w", CloseError{
+ Code: code,
+ Reason: reason,
+ })
+
+ if c.isClosed() {
+ return fmt.Errorf("tried to close with %q but connection already closed: %w", ce, c.closeErr)
+ }
+
+ c.setCloseErr(ce)
+ err := c.ws.Close(int(code), reason)
+ if err != nil {
+ return err
+ }
+
+ <-c.closed
+ if !c.closeWasClean {
+ return c.closeErr
+ }
+ return nil
+}
+
+// Subprotocol returns the negotiated subprotocol.
+// An empty string means the default protocol.
+func (c *Conn) Subprotocol() string {
+ return c.ws.Subprotocol()
+}
+
+// DialOptions represents the options available to pass to Dial.
+type DialOptions struct {
+ // Subprotocols lists the subprotocols to negotiate with the server.
+ Subprotocols []string
+}
+
+// Dial creates a new WebSocket connection to the given url with the given options.
+// The passed context bounds the maximum time spent waiting for the connection to open.
+// The returned *http.Response is always nil or a mock. It's only in the signature
+// to match the core API.
+func Dial(ctx context.Context, url string, opts *DialOptions) (*Conn, *http.Response, error) {
+ c, resp, err := dial(ctx, url, opts)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to WebSocket dial %q: %w", url, err)
+ }
+ return c, resp, nil
+}
+
+func dial(ctx context.Context, url string, opts *DialOptions) (*Conn, *http.Response, error) {
+ if opts == nil {
+ opts = &DialOptions{}
+ }
+
+ url = strings.Replace(url, "http://", "ws://", 1)
+ url = strings.Replace(url, "https://", "wss://", 1)
+
+ ws, err := wsjs.New(url, opts.Subprotocols)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ c := &Conn{
+ ws: ws,
+ }
+ c.init()
+
+ opench := make(chan struct{})
+ releaseOpen := ws.OnOpen(func(e js.Value) {
+ close(opench)
+ })
+ defer releaseOpen()
+
+ select {
+ case <-ctx.Done():
+ c.Close(StatusPolicyViolation, "dial timed out")
+ return nil, nil, ctx.Err()
+ case <-opench:
+ return c, &http.Response{
+ StatusCode: http.StatusSwitchingProtocols,
+ }, nil
+ case <-c.closed:
+ return nil, nil, c.closeErr
+ }
+}
+
+// Reader attempts to read a message from the connection.
+// The maximum time spent waiting is bounded by the context.
+func (c *Conn) Reader(ctx context.Context) (MessageType, io.Reader, error) {
+ typ, p, err := c.Read(ctx)
+ if err != nil {
+ return 0, nil, err
+ }
+ return typ, bytes.NewReader(p), nil
+}
+
+// Writer returns a writer to write a WebSocket data message to the connection.
+// It buffers the entire message in memory and then sends it when the writer
+// is closed.
+func (c *Conn) Writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) {
+ return writer{
+ c: c,
+ ctx: ctx,
+ typ: typ,
+ b: bpool.Get(),
+ }, nil
+}
+
+type writer struct {
+ closed bool
+
+ c *Conn
+ ctx context.Context
+ typ MessageType
+
+ b *bytes.Buffer
+}
+
+func (w writer) Write(p []byte) (int, error) {
+ if w.closed {
+ return 0, errors.New("cannot write to closed writer")
+ }
+ n, err := w.b.Write(p)
+ if err != nil {
+ return n, fmt.Errorf("failed to write message: %w", err)
+ }
+ return n, nil
+}
+
+func (w writer) Close() error {
+ if w.closed {
+ return errors.New("cannot close closed writer")
+ }
+ w.closed = true
+ defer bpool.Put(w.b)
+
+ err := w.c.Write(w.ctx, w.typ, w.b.Bytes())
+ if err != nil {
+ return fmt.Errorf("failed to close writer: %w", err)
+ }
+ return nil
+}
+
+// CloseRead implements *Conn.CloseRead for wasm.
+func (c *Conn) CloseRead(ctx context.Context) context.Context {
+ c.isReadClosed.Store(1)
+
+ ctx, cancel := context.WithCancel(ctx)
+ go func() {
+ defer cancel()
+ c.read(ctx)
+ c.Close(StatusPolicyViolation, "unexpected data message")
+ }()
+ return ctx
+}
+
+// SetReadLimit implements *Conn.SetReadLimit for wasm.
+func (c *Conn) SetReadLimit(n int64) {
+ c.msgReadLimit.Store(n)
+}
+
+func (c *Conn) setCloseErr(err error) {
+ c.closeErrOnce.Do(func() {
+ c.closeErr = fmt.Errorf("WebSocket closed: %w", err)
+ })
+}
+
+func (c *Conn) isClosed() bool {
+ select {
+ case <-c.closed:
+ return true
+ default:
+ return false
+ }
+}